refactoring: config struct & pipelines
This commit is contained in:
@@ -63,7 +63,7 @@ func (g *GenericDownloader) Start() error {
|
||||
g.Params = argsSanitizer(g.Params)
|
||||
|
||||
out := internal.DownloadOutput{
|
||||
Path: config.Instance().DownloadPath,
|
||||
Path: config.Instance().Paths.DownloadPath,
|
||||
Filename: "%(title)s.%(ext)s",
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ func (g *GenericDownloader) Start() error {
|
||||
|
||||
slog.Info("requesting download", slog.String("url", g.URL), slog.Any("params", params))
|
||||
|
||||
cmd := exec.Command(config.Instance().DownloaderPath, params...)
|
||||
cmd := exec.Command(config.Instance().Paths.DownloaderPath, params...)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
|
||||
@@ -57,7 +57,7 @@ func (l *LiveStreamDownloader) Start() error {
|
||||
|
||||
params := append(baseParams, "-o", "-")
|
||||
|
||||
cmd := exec.Command(config.Instance().DownloaderPath, params...)
|
||||
cmd := exec.Command(config.Instance().Paths.DownloaderPath, params...)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
// stdout = media stream
|
||||
@@ -102,11 +102,11 @@ func (l *LiveStreamDownloader) Start() error {
|
||||
if !l.hasFileWriter() {
|
||||
go func() {
|
||||
filepath.Join(
|
||||
config.Instance().DownloadPath,
|
||||
config.Instance().Paths.DownloadPath,
|
||||
fmt.Sprintf("%s (live) %s.mp4", l.Id, time.Now().Format(time.ANSIC)),
|
||||
)
|
||||
|
||||
defaultPath := filepath.Join(config.Instance().DownloadPath)
|
||||
defaultPath := filepath.Join(config.Instance().Paths.DownloadPath)
|
||||
f, err := os.Create(defaultPath)
|
||||
if err != nil {
|
||||
slog.Error("failed to create fallback file", slog.Any("err", err))
|
||||
|
||||
@@ -1,17 +1,13 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
|
||||
@@ -111,28 +107,6 @@ func (m *Store) All() *[]internal.ProcessSnapshot {
|
||||
return &running
|
||||
}
|
||||
|
||||
// Persist the database in a single file named "session.dat"
|
||||
func (m *Store) Persist() error {
|
||||
running := m.All()
|
||||
|
||||
sf := filepath.Join(config.Instance().SessionFilePath, "session.dat")
|
||||
|
||||
fd, err := os.Create(sf)
|
||||
if err != nil {
|
||||
return errors.Join(errors.New("failed to persist session"), err)
|
||||
}
|
||||
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
session := Session{Processes: *running}
|
||||
|
||||
if err := gob.NewEncoder(fd).Encode(session); err != nil {
|
||||
return errors.Join(errors.New("failed to persist session"), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restore a persisted state
|
||||
func (m *Store) Restore(mq *queue.MessageQueue) {
|
||||
m.mu.Lock()
|
||||
|
||||
@@ -54,13 +54,13 @@ func New(url string, done chan *LiveStream, mq *queue.MessageQueue, store *kv.St
|
||||
// Start the livestream monitoring process, once completion signals on the done channel
|
||||
func (l *LiveStream) Start() error {
|
||||
cmd := exec.Command(
|
||||
config.Instance().DownloaderPath,
|
||||
config.Instance().Paths.DownloaderPath,
|
||||
l.url,
|
||||
"--wait-for-video", "30", // wait for the stream to be live and recheck every 10 secs
|
||||
"--no-colors", // no ansi color fuzz
|
||||
"--simulate",
|
||||
"--newline",
|
||||
"--paths", config.Instance().DownloadPath,
|
||||
"--paths", config.Instance().Paths.DownloadPath,
|
||||
)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func setupTest() {
|
||||
config.Instance().DownloaderPath = "build/yt-dlp"
|
||||
config.Instance().Paths.DownloaderPath = "build/yt-dlp"
|
||||
}
|
||||
|
||||
const URL = "https://www.youtube.com/watch?v=pwoAyLGOysU"
|
||||
|
||||
@@ -17,6 +17,11 @@ type Monitor struct {
|
||||
}
|
||||
|
||||
func NewMonitor(mq *queue.MessageQueue, store *kv.Store, db *bolt.DB) *Monitor {
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(bucket)
|
||||
return err
|
||||
})
|
||||
|
||||
return &Monitor{
|
||||
mq: mq,
|
||||
db: db,
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func DefaultFetcher(url string) (*common.DownloadMetadata, error) {
|
||||
cmd := exec.Command(config.Instance().DownloaderPath, url, "-J")
|
||||
cmd := exec.Command(config.Instance().Paths.DownloaderPath, url, "-J")
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
|
||||
92
server/internal/pipeline/rest.go
Normal file
92
server/internal/pipeline/rest.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package pipeline
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type handler struct {
|
||||
store *Store
|
||||
}
|
||||
|
||||
func NewRestHandler(db *bolt.DB) *handler {
|
||||
store, _ := NewStore(db)
|
||||
return &handler{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) GetPipeline(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
id := chi.URLParam(r, "id")
|
||||
|
||||
p, err := h.store.Get(id)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(p); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) GetAllPipelines(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
p, err := h.store.List()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(p); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) SavePipeline(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
defer r.Body.Close()
|
||||
var req Pipeline
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
id, err := h.store.Save(req)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode(id); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) DeletePipeline(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
id := chi.URLParam(r, "id")
|
||||
|
||||
err := h.store.Delete(id)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(w).Encode("ok"); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -13,6 +14,7 @@ type Step struct {
|
||||
Type string `json:"type"` // es. "transcoder", "filewriter"
|
||||
FFmpegArgs []string `json:"ffmpeg_args,omitempty"` // args da passare a ffmpeg
|
||||
Path string `json:"path,omitempty"` // solo per filewriter
|
||||
Extension string `json:"extension,omitempty"` // solo per filewriter
|
||||
}
|
||||
|
||||
type Pipeline struct {
|
||||
@@ -25,14 +27,9 @@ type Store struct {
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
func NewStore(path string) (*Store, error) {
|
||||
db, err := bolt.Open(path, 0600, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func NewStore(db *bolt.DB) (*Store, error) {
|
||||
// init bucket
|
||||
err = db.Update(func(tx *bolt.Tx) error {
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(bucket)
|
||||
return err
|
||||
})
|
||||
@@ -43,13 +40,17 @@ func NewStore(path string) (*Store, error) {
|
||||
return &Store{db: db}, nil
|
||||
}
|
||||
|
||||
func (s *Store) Save(p Pipeline) error {
|
||||
data, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return err
|
||||
func (s *Store) Save(p Pipeline) (string, error) {
|
||||
if p.ID == "" {
|
||||
p.ID = uuid.NewString()
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
data, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return p.ID, s.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
return b.Put([]byte(p.ID), data)
|
||||
})
|
||||
@@ -93,3 +94,10 @@ func (s *Store) List() ([]Pipeline, error) {
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *Store) Delete(id string) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket(bucket)
|
||||
return b.Delete([]byte(id))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,101 +5,119 @@ import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
|
||||
evbus "github.com/asaskevich/EventBus"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/metadata"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const queueName = "process:pending"
|
||||
|
||||
type MessageQueue struct {
|
||||
concurrency int
|
||||
eventBus evbus.Bus
|
||||
concurrency int
|
||||
downloadQueue chan downloaders.Downloader
|
||||
metadataQueue chan downloaders.Downloader
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// Creates a new message queue.
|
||||
// By default it will be created with a size equals to nthe number of logical
|
||||
// CPU cores -1.
|
||||
// The queue size can be set via the qs flag.
|
||||
func NewMessageQueue() (*MessageQueue, error) {
|
||||
qs := config.Instance().QueueSize
|
||||
|
||||
qs := config.Instance().Server.QueueSize
|
||||
if qs <= 0 {
|
||||
return nil, errors.New("invalid queue size")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &MessageQueue{
|
||||
concurrency: qs,
|
||||
eventBus: evbus.New(),
|
||||
concurrency: qs,
|
||||
downloadQueue: make(chan downloaders.Downloader, qs*2),
|
||||
metadataQueue: make(chan downloaders.Downloader, qs*4),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Publish a message to the queue and set the task to a peding state.
|
||||
func (m *MessageQueue) Publish(p downloaders.Downloader) {
|
||||
// needs to have an id set before
|
||||
p.SetPending(true)
|
||||
// Publish download job
|
||||
func (m *MessageQueue) Publish(d downloaders.Downloader) {
|
||||
d.SetPending(true)
|
||||
|
||||
m.eventBus.Publish(queueName, p)
|
||||
select {
|
||||
case m.downloadQueue <- d:
|
||||
slog.Info("published download", slog.String("id", d.GetId()))
|
||||
case <-m.ctx.Done():
|
||||
slog.Warn("queue stopped, dropping download", slog.String("id", d.GetId()))
|
||||
}
|
||||
}
|
||||
|
||||
// Workers: download + metadata
|
||||
func (m *MessageQueue) SetupConsumers() {
|
||||
go m.downloadConsumer()
|
||||
go m.metadataSubscriber()
|
||||
// N parallel workers for downloadQueue
|
||||
for i := 0; i < m.concurrency; i++ {
|
||||
go m.downloadWorker(i)
|
||||
}
|
||||
|
||||
// 1 serial worker for metadata
|
||||
go m.metadataWorker()
|
||||
}
|
||||
|
||||
// Setup the consumer listener which subscribes to the changes to the producer
|
||||
// channel and triggers the "download" action.
|
||||
func (m *MessageQueue) downloadConsumer() {
|
||||
sem := semaphore.NewWeighted(int64(m.concurrency))
|
||||
|
||||
m.eventBus.SubscribeAsync(queueName, func(p downloaders.Downloader) {
|
||||
sem.Acquire(context.Background(), 1)
|
||||
defer sem.Release(1)
|
||||
|
||||
slog.Info("received process from event bus",
|
||||
slog.String("bus", queueName),
|
||||
slog.String("consumer", "downloadConsumer"),
|
||||
slog.String("id", p.GetId()),
|
||||
)
|
||||
|
||||
if !p.IsCompleted() {
|
||||
slog.Info("started process",
|
||||
slog.String("bus", queueName),
|
||||
slog.String("id", p.GetId()),
|
||||
)
|
||||
p.Start()
|
||||
}
|
||||
}, false)
|
||||
}
|
||||
|
||||
// Setup the metadata consumer listener which subscribes to the changes to the
|
||||
// producer channel and adds metadata to each download.
|
||||
func (m *MessageQueue) metadataSubscriber() {
|
||||
// How many concurrent metadata fetcher jobs are spawned
|
||||
// Since there's ongoing downloads, 1 job at time seems a good compromise
|
||||
sem := semaphore.NewWeighted(1)
|
||||
|
||||
m.eventBus.SubscribeAsync(queueName, func(p downloaders.Downloader) {
|
||||
sem.Acquire(context.Background(), 1)
|
||||
defer sem.Release(1)
|
||||
|
||||
slog.Info("received process from event bus",
|
||||
slog.String("bus", queueName),
|
||||
slog.String("consumer", "metadataConsumer"),
|
||||
slog.String("id", p.GetId()),
|
||||
)
|
||||
|
||||
if p.IsCompleted() {
|
||||
slog.Warn("proccess has an illegal state",
|
||||
slog.String("id", p.GetId()),
|
||||
slog.String("status", "completed"),
|
||||
)
|
||||
// Worker dei download
|
||||
func (m *MessageQueue) downloadWorker(workerId int) {
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case p := <-m.downloadQueue:
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
if p.IsCompleted() {
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Info("download worker started",
|
||||
slog.Int("worker", workerId),
|
||||
slog.String("id", p.GetId()),
|
||||
)
|
||||
|
||||
p.Start()
|
||||
|
||||
// after the download starts succesfully we pass it to the metadata queue
|
||||
select {
|
||||
case m.metadataQueue <- p:
|
||||
slog.Info("queued for metadata", slog.String("id", p.GetId()))
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p.SetMetadata(metadata.DefaultFetcher)
|
||||
|
||||
}, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MessageQueue) metadataWorker() {
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
return
|
||||
case p := <-m.metadataQueue:
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Info("metadata worker started",
|
||||
slog.String("id", p.GetId()),
|
||||
)
|
||||
|
||||
if p.IsCompleted() {
|
||||
slog.Warn("metadata skipped, illegal state",
|
||||
slog.String("id", p.GetId()),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
p.SetMetadata(metadata.DefaultFetcher)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MessageQueue) Stop() {
|
||||
m.cancel()
|
||||
close(m.downloadQueue)
|
||||
close(m.metadataQueue)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user