migrated to boltdb from sqlite + session files

This commit is contained in:
2025-08-31 20:58:54 +02:00
parent 4c35b0b41f
commit 658d43f9ea
15 changed files with 448 additions and 400 deletions

View File

@@ -2,31 +2,58 @@ package kv
import (
"encoding/gob"
"encoding/json"
"errors"
"log/slog"
"os"
"path/filepath"
"runtime"
"sync"
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
bolt "go.etcd.io/bbolt"
)
var memDbEvents = make(chan downloaders.Downloader, runtime.NumCPU())
var (
bucket = []byte("downloads")
memDbEvents = make(chan downloaders.Downloader, runtime.NumCPU())
)
// In-Memory Thread-Safe Key-Value Storage with optional persistence
type Store struct {
db *bolt.DB
table map[string]downloaders.Downloader
mu sync.RWMutex
}
func NewStore() *Store {
return &Store{
func NewStore(db *bolt.DB, snaptshotInteval time.Duration) (*Store, error) {
// init bucket
err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucket)
return err
})
if err != nil {
return nil, err
}
s := &Store{
db: db,
table: make(map[string]downloaders.Downloader),
}
go func() {
ticker := time.NewTicker(snaptshotInteval)
for range ticker.C {
s.Snapshot()
}
}()
return s, err
}
// Get a process pointer given its id
@@ -108,25 +135,25 @@ func (m *Store) Persist() error {
// Restore a persisted state
func (m *Store) Restore(mq *queue.MessageQueue) {
sf := filepath.Join(config.Instance().SessionFilePath, "session.dat")
fd, err := os.Open(sf)
if err != nil {
return
}
var session Session
if err := gob.NewDecoder(fd).Decode(&session); err != nil {
return
}
m.mu.Lock()
defer m.mu.Unlock()
for _, snap := range session.Processes {
var restored downloaders.Downloader
var snapshot []internal.ProcessSnapshot
m.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.ForEach(func(k, v []byte) error {
var snap internal.ProcessSnapshot
if err := json.Unmarshal(v, &snap); err != nil {
return err
}
snapshot = append(snapshot, snap)
return nil
})
})
for _, snap := range snapshot {
var restored downloaders.Downloader
if snap.DownloaderName == "generic" {
d := downloaders.NewGenericDownload("", []string{})
err := d.RestoreFromSnapshot(&snap)
@@ -134,9 +161,7 @@ func (m *Store) Restore(mq *queue.MessageQueue) {
continue
}
restored = d
m.table[snap.Id] = restored
if !restored.(*downloaders.GenericDownloader).DownloaderBase.Completed {
mq.Publish(restored)
}
@@ -152,3 +177,23 @@ func (m *Store) EventListener() {
}
}
}
func (m *Store) Snapshot() error {
slog.Debug("snapshotting downloads state")
running := m.All()
return m.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
for _, v := range *running {
data, err := json.Marshal(v)
if err != nil {
return err
}
if err := b.Put([]byte(v.Id), data); err != nil {
return err
}
}
return nil
})
}

View File

@@ -35,11 +35,11 @@ type LiveStream struct {
waitTime time.Duration
liveDate time.Time
mq *queue.MessageQueue
db *kv.Store
mq *queue.MessageQueue
store *kv.Store
}
func New(url string, done chan *LiveStream, mq *queue.MessageQueue, db *kv.Store) *LiveStream {
func New(url string, done chan *LiveStream, mq *queue.MessageQueue, store *kv.Store) *LiveStream {
return &LiveStream{
url: url,
done: done,
@@ -47,7 +47,7 @@ func New(url string, done chan *LiveStream, mq *queue.MessageQueue, db *kv.Store
waitTime: time.Second * 0,
waitTimeChan: make(chan time.Duration),
mq: mq,
db: db,
store: store,
}
}
@@ -94,7 +94,7 @@ func (l *LiveStream) Start() error {
//TODO: add pipes
d := downloaders.NewLiveStreamDownloader(l.url, []pipes.Pipe{})
l.db.Set(d)
l.store.Set(d)
l.mq.Publish(d)
return nil

View File

@@ -1,28 +1,26 @@
package livestream
import (
"encoding/gob"
"log/slog"
"maps"
"os"
"path/filepath"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
bolt "go.etcd.io/bbolt"
)
var bucket = []byte("livestreams")
type Monitor struct {
db *kv.Store // where the just started livestream will be published
db *bolt.DB
store *kv.Store // where the just started livestream will be published
mq *queue.MessageQueue // where the just started livestream will be published
streams map[string]*LiveStream // keeps track of the livestreams
done chan *LiveStream // to signal individual processes completition
}
func NewMonitor(mq *queue.MessageQueue, db *kv.Store) *Monitor {
func NewMonitor(mq *queue.MessageQueue, store *kv.Store, db *bolt.DB) *Monitor {
return &Monitor{
mq: mq,
db: db,
store: store,
streams: make(map[string]*LiveStream),
done: make(chan *LiveStream),
}
@@ -32,14 +30,24 @@ func NewMonitor(mq *queue.MessageQueue, db *kv.Store) *Monitor {
func (m *Monitor) Schedule() {
for l := range m.done {
delete(m.streams, l.url)
m.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.Delete([]byte(l.url))
})
}
}
func (m *Monitor) Add(url string) {
ls := New(url, m.done, m.mq, m.db)
ls := New(url, m.done, m.mq, m.store)
go ls.Start()
m.streams[url] = ls
m.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.Put([]byte(url), []byte{})
})
}
func (m *Monitor) Remove(url string) error {
@@ -59,11 +67,6 @@ func (m *Monitor) Status() LiveStreamStatus {
status := make(LiveStreamStatus)
for k, v := range m.streams {
// wt, ok := <-v.WaitTime()
// if !ok {
// continue
// }
status[k] = Status{
Status: v.status,
WaitTime: v.waitTime,
@@ -74,46 +77,13 @@ func (m *Monitor) Status() LiveStreamStatus {
return status
}
// Persist the monitor current state to a file.
// The file is located in the configured config directory
func (m *Monitor) Persist() error {
fd, err := os.Create(filepath.Join(config.Instance().SessionFilePath, "livestreams.dat"))
if err != nil {
return err
}
defer fd.Close()
slog.Debug("persisting livestream monitor state")
var toPersist []string
for url := range maps.Keys(m.streams) {
toPersist = append(toPersist, url)
}
return gob.NewEncoder(fd).Encode(toPersist)
}
// Restore a saved state and resume the monitored livestreams
func (m *Monitor) Restore() error {
fd, err := os.Open(filepath.Join(config.Instance().SessionFilePath, "livestreams.dat"))
if err != nil {
return err
}
defer fd.Close()
var toRestore []string
if err := gob.NewDecoder(fd).Decode(&toRestore); err != nil {
return err
}
for _, url := range toRestore {
m.Add(url)
}
slog.Debug("restored livestream monitor state")
return nil
return m.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.ForEach(func(k, v []byte) error {
m.Add(string(k))
return nil
})
})
}

View File

@@ -0,0 +1,95 @@
package pipeline
import (
"encoding/json"
"fmt"
bolt "go.etcd.io/bbolt"
)
var bucket = []byte("pipelines")
type Step struct {
Type string `json:"type"` // es. "transcoder", "filewriter"
FFmpegArgs []string `json:"ffmpeg_args,omitempty"` // args da passare a ffmpeg
Path string `json:"path,omitempty"` // solo per filewriter
}
type Pipeline struct {
ID string `json:"id"`
Name string `json:"name"`
Steps []Step `json:"steps"`
}
type Store struct {
db *bolt.DB
}
func NewStore(path string) (*Store, error) {
db, err := bolt.Open(path, 0600, nil)
if err != nil {
return nil, err
}
// init bucket
err = db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucket)
return err
})
if err != nil {
return nil, err
}
return &Store{db: db}, nil
}
func (s *Store) Save(p Pipeline) error {
data, err := json.Marshal(p)
if err != nil {
return err
}
return s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.Put([]byte(p.ID), data)
})
}
func (s *Store) Get(id string) (*Pipeline, error) {
var p Pipeline
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
v := b.Get([]byte(id))
if v == nil {
return fmt.Errorf("pipeline %s not found", id)
}
return json.Unmarshal(v, &p)
})
if err != nil {
return nil, err
}
return &p, nil
}
func (s *Store) List() ([]Pipeline, error) {
var result []Pipeline
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.ForEach(func(k, v []byte) error {
var p Pipeline
if err := json.Unmarshal(v, &p); err != nil {
return err
}
result = append(result, p)
return nil
})
})
if err != nil {
return nil, err
}
return result, nil
}