Compare commits
11 Commits
feat-suppo
...
v3.2.6
| Author | SHA1 | Date | |
|---|---|---|---|
| e4362468f7 | |||
| 6880f60d14 | |||
|
|
5d4aa7e2a3 | ||
|
|
2845196bc7 | ||
|
|
983915f8aa | ||
| ce2fb13ef2 | |||
| 99069fe5f7 | |||
| 761f26b387 | |||
| eec72bb6e2 | |||
| ceb92d066c | |||
|
|
cf74948840 |
1
.deploystack/docker-run.txt
Normal file
1
.deploystack/docker-run.txt
Normal file
@@ -0,0 +1 @@
|
||||
docker run -d -p 3033:3033 -v /downloads:/downloads marcobaobao/yt-dlp-webui
|
||||
@@ -3,6 +3,7 @@
|
||||
result/
|
||||
result
|
||||
dist
|
||||
.pnpm-store/
|
||||
.pnpm-debug.log
|
||||
node_modules
|
||||
.env
|
||||
@@ -20,9 +21,11 @@ cookies.txt
|
||||
__debug*
|
||||
ui/
|
||||
.idea
|
||||
.idea/
|
||||
frontend/.pnp.cjs
|
||||
frontend/.pnp.loader.mjs
|
||||
frontend/.yarn/install-state.gz
|
||||
.db.lock
|
||||
livestreams.dat
|
||||
.git
|
||||
.vite/deps
|
||||
archive.txt
|
||||
|
||||
12
README.md
12
README.md
@@ -28,7 +28,7 @@ docker pull ghcr.io/marcopiovanello/yt-dlp-web-ui:latest
|
||||
## Community stuff
|
||||
Feel free to join :)
|
||||
|
||||
[](https://discord.gg/3Sj9ZZHv)
|
||||
[](https://discord.gg/WRnVWr4y)
|
||||
|
||||
## Some screeshots
|
||||

|
||||
@@ -115,6 +115,16 @@ services:
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
### ⚡ One-Click Deploy
|
||||
|
||||
| Cloud Provider | Deploy Button |
|
||||
|----------------|---------------|
|
||||
| AWS | <a href="https://deploystack.io/deploy/marcopiovanello-yt-dlp-web-ui?provider=aws&language=cfn"><img src="https://raw.githubusercontent.com/deploystackio/deploy-templates/refs/heads/main/.assets/img/aws.svg" height="38"></a> |
|
||||
| DigitalOcean | <a href="https://deploystack.io/deploy/marcopiovanello-yt-dlp-web-ui?provider=do&language=dop"><img src="https://raw.githubusercontent.com/deploystackio/deploy-templates/refs/heads/main/.assets/img/do.svg" height="38"></a> |
|
||||
| Render | <a href="https://deploystack.io/deploy/marcopiovanello-yt-dlp-web-ui?provider=rnd&language=rnd"><img src="https://raw.githubusercontent.com/deploystackio/deploy-templates/refs/heads/main/.assets/img/rnd.svg" height="38"></a> |
|
||||
|
||||
<sub>Generated by <a href="https://deploystack.io/c/marcopiovanello-yt-dlp-web-ui" target="_blank">DeployStack.io</a></sub>
|
||||
|
||||
## [Prebuilt binaries](https://github.com/marcopiovanello/yt-dlp-web-ui/releases) installation
|
||||
|
||||
```sh
|
||||
|
||||
@@ -31,7 +31,7 @@ keys:
|
||||
splashText: No active downloads
|
||||
archiveTitle: Archive
|
||||
clipboardAction: Copied URL to clipboard
|
||||
playlistCheckbox: Download playlist (it will take time, after submitting you may close this window)
|
||||
playlistCheckbox: Download playlist
|
||||
restartAppMessage: Needs a page reload to take effect
|
||||
servedFromReverseProxyCheckbox: Is behind a reverse proxy
|
||||
urlBase: URL base, for reverse proxy support (subdir), defaults to empty
|
||||
@@ -79,4 +79,5 @@ keys:
|
||||
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
|
||||
cronExpressionLabel: 'Cron expression'
|
||||
editButtonLabel: 'Edit'
|
||||
newSubscriptionButton: New subscription
|
||||
newSubscriptionButton: New subscription
|
||||
clearCompletedButton: 'Clear completed'
|
||||
@@ -121,11 +121,14 @@ export const appTitleState = atomWithStorage(
|
||||
export const serverAddressAndPortState = atom((get) => {
|
||||
if (get(servedFromReverseProxySubDirState)) {
|
||||
return `${get(serverAddressState)}/${get(servedFromReverseProxySubDirState)}/`
|
||||
.replaceAll('"', '') // TODO: atomWithStorage put extra double quotes on strings
|
||||
}
|
||||
if (get(servedFromReverseProxyState)) {
|
||||
return `${get(serverAddressState)}`
|
||||
.replaceAll('"', '')
|
||||
}
|
||||
return `${get(serverAddressState)}:${get(serverPortState)}`
|
||||
.replaceAll('"', '')
|
||||
})
|
||||
|
||||
export const serverURL = atom((get) =>
|
||||
@@ -135,14 +138,12 @@ export const serverURL = atom((get) =>
|
||||
export const rpcWebSocketEndpoint = atom((get) => {
|
||||
const proto = window.location.protocol === 'https:' ? 'wss:' : 'ws:'
|
||||
return `${proto}//${get(serverAddressAndPortState)}/rpc/ws`
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
export const rpcHTTPEndpoint = atom((get) => {
|
||||
const proto = window.location.protocol
|
||||
return `${proto}//${get(serverAddressAndPortState)}/rpc/http`
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
export const serverSideCookiesState = atom<Promise<string>>(async (get) => await pipe(
|
||||
ffetch<Readonly<{ cookies: string }>>(`${get(serverURL)}/api/v1/cookies`),
|
||||
@@ -180,5 +181,4 @@ export const settingsState = atom<SettingsState>((get) => ({
|
||||
listView: get(listViewState),
|
||||
servedFromReverseProxy: get(servedFromReverseProxyState),
|
||||
appTitle: get(appTitleState)
|
||||
})
|
||||
)
|
||||
}))
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import AddCircleIcon from '@mui/icons-material/AddCircle'
|
||||
import BuildCircleIcon from '@mui/icons-material/BuildCircle'
|
||||
import ClearAllIcon from '@mui/icons-material/ClearAll'
|
||||
import DeleteForeverIcon from '@mui/icons-material/DeleteForever'
|
||||
import FolderZipIcon from '@mui/icons-material/FolderZip'
|
||||
import FormatListBulleted from '@mui/icons-material/FormatListBulleted'
|
||||
@@ -42,6 +43,11 @@ const HomeSpeedDial: React.FC<Props> = ({ onDownloadOpen, onEditorOpen }) => {
|
||||
tooltipTitle={i18n.t('bulkDownload')}
|
||||
onClick={() => window.open(`${serverAddr}/archive/bulk?token=${localStorage.getItem('token')}`)}
|
||||
/>
|
||||
<SpeedDialAction
|
||||
icon={<ClearAllIcon />}
|
||||
tooltipTitle={i18n.t('clearCompletedButton')}
|
||||
onClick={() => client.clearCompleted()}
|
||||
/>
|
||||
<SpeedDialAction
|
||||
icon={<DeleteForeverIcon />}
|
||||
tooltipTitle={i18n.t('abortAllButton')}
|
||||
|
||||
@@ -200,4 +200,11 @@ export class RPCClient {
|
||||
params: []
|
||||
})
|
||||
}
|
||||
|
||||
public clearCompleted() {
|
||||
return this.sendHTTP({
|
||||
method: 'Service.ClearCompleted',
|
||||
params: []
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,7 @@ export type RPCMethods =
|
||||
| "Service.ProgressLivestream"
|
||||
| "Service.KillLivestream"
|
||||
| "Service.KillAllLivestream"
|
||||
| "Service.ClearCompleted"
|
||||
|
||||
export type RPCRequest = {
|
||||
method: RPCMethods
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/marcopiovanello/yt-dlp-web-ui/v3
|
||||
|
||||
go 1.23
|
||||
go 1.24
|
||||
|
||||
require (
|
||||
github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef
|
||||
|
||||
58
server/archive/utils.go
Normal file
58
server/archive/utils.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
|
||||
)
|
||||
|
||||
// Perform a search on the archive.txt file an determines if a download
|
||||
// has already be done.
|
||||
func DownloadExists(ctx context.Context, url string) (bool, error) {
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
config.Instance().DownloaderPath,
|
||||
"--print",
|
||||
"%(extractor)s %(id)s",
|
||||
url,
|
||||
)
|
||||
stdout, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
extractorAndURL := bytes.Trim(stdout, "\n")
|
||||
|
||||
fd, err := os.Open(filepath.Join(config.Instance().Dir(), "archive.txt"))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
scanner := bufio.NewScanner(fd)
|
||||
|
||||
// search linearly for lower memory usage...
|
||||
// the a pre-sorted with hashed values version of the archive.txt file can be loaded in memory
|
||||
// and perform a binary search on it.
|
||||
for scanner.Scan() {
|
||||
if bytes.Equal(scanner.Bytes(), extractorAndURL) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// data, err := io.ReadAll(fd)
|
||||
// if err != nil {
|
||||
// return false, err
|
||||
// }
|
||||
|
||||
// slices.BinarySearchFunc(data, extractorAndURL, func(a []byte, b []byte) int {
|
||||
// return hash(a).Compare(hash(b))
|
||||
// })
|
||||
|
||||
return false, nil
|
||||
}
|
||||
18
server/common/types.go
Normal file
18
server/common/types.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package common
|
||||
|
||||
import "time"
|
||||
|
||||
// Used to deser the yt-dlp -J output
|
||||
type DownloadInfo struct {
|
||||
URL string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Resolution string `json:"resolution"`
|
||||
Size int32 `json:"filesize_approx"`
|
||||
VCodec string `json:"vcodec"`
|
||||
ACodec string `json:"acodec"`
|
||||
Extension string `json:"ext"`
|
||||
OriginalURL string `json:"original_url"`
|
||||
FileName string `json:"filename"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package internal
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
type LoadBalancer struct {
|
||||
@@ -9,7 +10,29 @@ type LoadBalancer struct {
|
||||
done chan *Worker
|
||||
}
|
||||
|
||||
func (b *LoadBalancer) Balance(work chan Process) {
|
||||
func NewLoadBalancer(numWorker int) *LoadBalancer {
|
||||
var pool Pool
|
||||
|
||||
doneChan := make(chan *Worker)
|
||||
|
||||
for i := range numWorker {
|
||||
w := &Worker{
|
||||
requests: make(chan *Process, 1),
|
||||
index: i,
|
||||
}
|
||||
go w.Work(doneChan)
|
||||
pool = append(pool, w)
|
||||
|
||||
slog.Info("spawned worker", slog.Int("index", i))
|
||||
}
|
||||
|
||||
return &LoadBalancer{
|
||||
pool: pool,
|
||||
done: doneChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LoadBalancer) Balance(work chan *Process) {
|
||||
for {
|
||||
select {
|
||||
case req := <-work:
|
||||
@@ -20,7 +43,7 @@ func (b *LoadBalancer) Balance(work chan Process) {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LoadBalancer) dispatch(req Process) {
|
||||
func (b *LoadBalancer) dispatch(req *Process) {
|
||||
w := heap.Pop(&b.pool).(*Worker)
|
||||
w.requests <- req
|
||||
w.pending++
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package internal
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
|
||||
)
|
||||
|
||||
// Used to unmarshall yt-dlp progress
|
||||
type ProgressTemplate struct {
|
||||
@@ -29,29 +31,14 @@ type DownloadProgress struct {
|
||||
ETA float64 `json:"eta"`
|
||||
}
|
||||
|
||||
// Used to deser the yt-dlp -J output
|
||||
type DownloadInfo struct {
|
||||
URL string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Resolution string `json:"resolution"`
|
||||
Size int32 `json:"filesize_approx"`
|
||||
VCodec string `json:"vcodec"`
|
||||
ACodec string `json:"acodec"`
|
||||
Extension string `json:"ext"`
|
||||
OriginalURL string `json:"original_url"`
|
||||
FileName string `json:"filename"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
|
||||
// struct representing the response sent to the client
|
||||
// as JSON-RPC result field
|
||||
type ProcessResponse struct {
|
||||
Id string `json:"id"`
|
||||
Progress DownloadProgress `json:"progress"`
|
||||
Info DownloadInfo `json:"info"`
|
||||
Output DownloadOutput `json:"output"`
|
||||
Params []string `json:"params"`
|
||||
Id string `json:"id"`
|
||||
Progress DownloadProgress `json:"progress"`
|
||||
Info common.DownloadInfo `json:"info"`
|
||||
Output DownloadOutput `json:"output"`
|
||||
Params []string `json:"params"`
|
||||
}
|
||||
|
||||
// struct representing the current status of the memoryDB
|
||||
|
||||
@@ -141,26 +141,13 @@ func (l *LiveStream) monitorStartTime(r io.Reader) {
|
||||
}
|
||||
}
|
||||
|
||||
const TRIES = 5
|
||||
/*
|
||||
if it's waiting a livestream the 5th line will indicate the time to live
|
||||
its a dumb and not robust method.
|
||||
scanner.Scan()
|
||||
|
||||
example:
|
||||
[youtube] Extracting URL: https://www.youtube.com/watch?v=IQVbGfVVjgY
|
||||
[youtube] IQVbGfVVjgY: Downloading webpage
|
||||
[youtube] IQVbGfVVjgY: Downloading ios player API JSON
|
||||
[youtube] IQVbGfVVjgY: Downloading web creator player API JSON
|
||||
WARNING: [youtube] This live event will begin in 27 minutes. <- STDERR, ignore
|
||||
[wait] Waiting for 00:27:15 - Press Ctrl+C to try now <- 5th line
|
||||
*/
|
||||
for range TRIES {
|
||||
for !strings.Contains(scanner.Text(), "Waiting for") {
|
||||
scanner.Scan()
|
||||
|
||||
if strings.Contains(scanner.Text(), "Waiting for") {
|
||||
waitTimeScanner()
|
||||
}
|
||||
}
|
||||
|
||||
waitTimeScanner()
|
||||
}
|
||||
|
||||
func (l *LiveStream) WaitTime() <-chan time.Duration {
|
||||
|
||||
@@ -9,15 +9,17 @@ import (
|
||||
)
|
||||
|
||||
func setupTest() {
|
||||
config.Instance().DownloaderPath = "yt-dlp"
|
||||
config.Instance().DownloaderPath = "build/yt-dlp"
|
||||
}
|
||||
|
||||
const URL = "https://www.youtube.com/watch?v=pwoAyLGOysU"
|
||||
|
||||
func TestLivestream(t *testing.T) {
|
||||
setupTest()
|
||||
|
||||
done := make(chan *LiveStream)
|
||||
|
||||
ls := New("https://www.youtube.com/watch?v=LSm1daKezcE", done, &internal.MessageQueue{}, &internal.MemoryDB{})
|
||||
ls := New(URL, done, &internal.MessageQueue{}, &internal.MemoryDB{})
|
||||
go ls.Start()
|
||||
|
||||
time.AfterFunc(time.Second*20, func() {
|
||||
|
||||
@@ -76,7 +76,7 @@ func (m *Monitor) Status() LiveStreamStatus {
|
||||
// Persist the monitor current state to a file.
|
||||
// The file is located in the configured config directory
|
||||
func (m *Monitor) Persist() error {
|
||||
fd, err := os.Create(filepath.Join(config.Instance().Dir(), "livestreams.dat"))
|
||||
fd, err := os.Create(filepath.Join(config.Instance().SessionFilePath, "livestreams.dat"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -95,7 +95,7 @@ func (m *Monitor) Persist() error {
|
||||
|
||||
// Restore a saved state and resume the monitored livestreams
|
||||
func (m *Monitor) Restore() error {
|
||||
fd, err := os.Open(filepath.Join(config.Instance().Dir(), "livestreams.dat"))
|
||||
fd, err := os.Open(filepath.Join(config.Instance().SessionFilePath, "livestreams.dat"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,20 +9,18 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/playlist"
|
||||
)
|
||||
|
||||
type metadata struct {
|
||||
Entries []DownloadInfo `json:"entries"`
|
||||
Count int `json:"playlist_count"`
|
||||
PlaylistTitle string `json:"title"`
|
||||
Type string `json:"_type"`
|
||||
}
|
||||
|
||||
func PlaylistDetect(req DownloadRequest, mq *MessageQueue, db *MemoryDB) error {
|
||||
params := append(req.Params, "--flat-playlist", "-J")
|
||||
urlWithParams := append([]string{req.URL}, params...)
|
||||
|
||||
var (
|
||||
downloader = config.Instance().DownloaderPath
|
||||
cmd = exec.Command(downloader, req.URL, "--flat-playlist", "-J")
|
||||
cmd = exec.Command(downloader, urlWithParams...)
|
||||
)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
@@ -30,7 +28,7 @@ func PlaylistDetect(req DownloadRequest, mq *MessageQueue, db *MemoryDB) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var m metadata
|
||||
var m playlist.Metadata
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
@@ -52,13 +50,21 @@ func PlaylistDetect(req DownloadRequest, mq *MessageQueue, db *MemoryDB) error {
|
||||
return errors.New("probably not a valid URL")
|
||||
}
|
||||
|
||||
if m.Type == "playlist" {
|
||||
entries := slices.CompactFunc(slices.Compact(m.Entries), func(a DownloadInfo, b DownloadInfo) bool {
|
||||
if m.IsPlaylist() {
|
||||
entries := slices.CompactFunc(slices.Compact(m.Entries), func(a common.DownloadInfo, b common.DownloadInfo) bool {
|
||||
return a.URL == b.URL
|
||||
})
|
||||
|
||||
entries = slices.DeleteFunc(entries, func(e common.DownloadInfo) bool {
|
||||
return strings.Contains(e.URL, "list=")
|
||||
})
|
||||
|
||||
slog.Info("playlist detected", slog.String("url", req.URL), slog.Int("count", len(entries)))
|
||||
|
||||
if err := playlist.ApplyModifiers(&entries, req.Params); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, meta := range entries {
|
||||
// detect playlist title from metadata since each playlist entry will be
|
||||
// treated as an individual download
|
||||
@@ -82,11 +88,13 @@ func PlaylistDetect(req DownloadRequest, mq *MessageQueue, db *MemoryDB) error {
|
||||
|
||||
proc.Info.URL = meta.URL
|
||||
|
||||
time.Sleep(time.Millisecond)
|
||||
|
||||
db.Set(proc)
|
||||
mq.Publish(proc)
|
||||
|
||||
proc.Info.CreatedAt = meta.CreatedAt
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
proc := &Process{
|
||||
|
||||
@@ -1,16 +1,24 @@
|
||||
package internal
|
||||
|
||||
// Pool implements heap.Interface interface as a standard priority queue
|
||||
type Pool []*Worker
|
||||
|
||||
func (h Pool) Len() int { return len(h) }
|
||||
func (h Pool) Less(i, j int) bool { return h[i].index < h[j].index }
|
||||
func (h Pool) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
func (h *Pool) Push(x any) { *h = append(*h, x.(*Worker)) }
|
||||
func (h Pool) Less(i, j int) bool { return h[i].pending < h[j].pending }
|
||||
|
||||
func (h Pool) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
h[i].index = i
|
||||
h[j].index = j
|
||||
}
|
||||
|
||||
func (h *Pool) Push(x any) { *h = append(*h, x.(*Worker)) }
|
||||
|
||||
func (h *Pool) Pop() any {
|
||||
old := *h
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
old[n-1] = nil
|
||||
*h = old[0 : n-1]
|
||||
return x
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/archiver"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
|
||||
)
|
||||
|
||||
@@ -50,7 +51,7 @@ type Process struct {
|
||||
Livestream bool
|
||||
AutoRemove bool
|
||||
Params []string
|
||||
Info DownloadInfo
|
||||
Info common.DownloadInfo
|
||||
Progress DownloadProgress
|
||||
Output DownloadOutput
|
||||
proc *os.Process
|
||||
@@ -302,7 +303,7 @@ func (p *Process) GetFileName(o *DownloadOutput) error {
|
||||
|
||||
func (p *Process) SetPending() {
|
||||
// Since video's title isn't available yet, fill in with the URL.
|
||||
p.Info = DownloadInfo{
|
||||
p.Info = common.DownloadInfo{
|
||||
URL: p.Url,
|
||||
Title: p.Url,
|
||||
CreatedAt: time.Now(),
|
||||
@@ -334,7 +335,7 @@ func (p *Process) SetMetadata() error {
|
||||
return err
|
||||
}
|
||||
|
||||
info := DownloadInfo{
|
||||
info := common.DownloadInfo{
|
||||
URL: p.Url,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package internal
|
||||
|
||||
type Worker struct {
|
||||
requests chan Process // downloads to do
|
||||
pending int // downloads pending
|
||||
index int // index in the heap
|
||||
requests chan *Process // downloads to do
|
||||
pending int // downloads pending
|
||||
index int // index in the heap
|
||||
}
|
||||
|
||||
func (w *Worker) Work(done chan *Worker) {
|
||||
|
||||
86
server/playlist/modifiers.go
Normal file
86
server/playlist/modifiers.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package playlist
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strconv"
|
||||
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
|
||||
)
|
||||
|
||||
/*
|
||||
Applicable modifiers
|
||||
|
||||
full | short | description
|
||||
---------------------------------------------------------------------------------
|
||||
--playlist-start NUMBER | -I NUMBER: | discard first N entries
|
||||
--playlist-end NUMBER | -I :NUMBER | discard last N entries
|
||||
--playlist-reverse | -I ::-1 | self explanatory
|
||||
--max-downloads NUMBER | | stops after N completed downloads
|
||||
*/
|
||||
|
||||
func ApplyModifiers(entries *[]common.DownloadInfo, args []string) error {
|
||||
for i, modifier := range args {
|
||||
switch modifier {
|
||||
case "--playlist-start":
|
||||
return playlistStart(i, modifier, args, entries)
|
||||
|
||||
case "--playlist-end":
|
||||
return playlistEnd(i, modifier, args, entries)
|
||||
|
||||
case "--max-downloads":
|
||||
return maxDownloads(i, modifier, args, entries)
|
||||
|
||||
case "--playlist-reverse":
|
||||
slices.Reverse(*entries)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func playlistStart(i int, modifier string, args []string, entries *[]common.DownloadInfo) error {
|
||||
if !guard(i, len(modifier)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
n, err := strconv.Atoi(args[i+1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*entries = (*entries)[n:]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func playlistEnd(i int, modifier string, args []string, entries *[]common.DownloadInfo) error {
|
||||
if !guard(i, len(modifier)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
n, err := strconv.Atoi(args[i+1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*entries = (*entries)[:n]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func maxDownloads(i int, modifier string, args []string, entries *[]common.DownloadInfo) error {
|
||||
if !guard(i, len(modifier)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
n, err := strconv.Atoi(args[i+1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*entries = (*entries)[0:n]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func guard(i, len int) bool { return i+1 < len-1 }
|
||||
12
server/playlist/types.go
Normal file
12
server/playlist/types.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package playlist
|
||||
|
||||
import "github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
|
||||
|
||||
type Metadata struct {
|
||||
Entries []common.DownloadInfo `json:"entries"`
|
||||
Count int `json:"playlist_count"`
|
||||
PlaylistTitle string `json:"title"`
|
||||
Type string `json:"_type"`
|
||||
}
|
||||
|
||||
func (m *Metadata) IsPlaylist() bool { return m.Type == "playlist" }
|
||||
@@ -183,6 +183,7 @@ func (s *Service) KillAll(args NoArgs, killed *string) error {
|
||||
}
|
||||
|
||||
slog.Info("succesfully killed process", slog.String("id", proc.Id))
|
||||
proc = nil // gc helper
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -195,6 +196,35 @@ func (s *Service) Clear(args string, killed *string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes completed processes
|
||||
func (s *Service) ClearCompleted(cleared *string) error {
|
||||
var (
|
||||
keys = s.db.Keys()
|
||||
removeFunc = func(p *internal.Process) error {
|
||||
defer s.db.Delete(p.Id)
|
||||
|
||||
if p.Progress.Status != internal.StatusCompleted {
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.Kill()
|
||||
}
|
||||
)
|
||||
|
||||
for _, key := range *keys {
|
||||
proc, err := s.db.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := removeFunc(proc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FreeSpace gets the available from package sys util
|
||||
func (s *Service) FreeSpace(args NoArgs, free *uint64) error {
|
||||
freeSpace, err := sys.FreeSpace()
|
||||
|
||||
@@ -53,6 +53,7 @@ func toDB(dto *domain.Subscription) data.Subscription {
|
||||
|
||||
// Delete implements domain.Service.
|
||||
func (s *Service) Delete(ctx context.Context, id string) error {
|
||||
s.runner.StopTask(id)
|
||||
return s.r.Delete(ctx, id)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/archive"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
|
||||
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/domain"
|
||||
@@ -19,10 +19,12 @@ import (
|
||||
type TaskRunner interface {
|
||||
Submit(subcription *domain.Subscription) error
|
||||
Spawner(ctx context.Context)
|
||||
StopTask(id string) error
|
||||
Recoverer()
|
||||
}
|
||||
|
||||
type taskPair struct {
|
||||
type monitorTask struct {
|
||||
Done chan struct{}
|
||||
Schedule cron.Schedule
|
||||
Subscription *domain.Subscription
|
||||
}
|
||||
@@ -31,21 +33,22 @@ type CronTaskRunner struct {
|
||||
mq *internal.MessageQueue
|
||||
db *internal.MemoryDB
|
||||
|
||||
tasks chan taskPair
|
||||
tasks chan monitorTask
|
||||
errors chan error
|
||||
|
||||
running map[string]*monitorTask
|
||||
}
|
||||
|
||||
func NewCronTaskRunner(mq *internal.MessageQueue, db *internal.MemoryDB) TaskRunner {
|
||||
return &CronTaskRunner{
|
||||
mq: mq,
|
||||
db: db,
|
||||
tasks: make(chan taskPair),
|
||||
errors: make(chan error),
|
||||
mq: mq,
|
||||
db: db,
|
||||
tasks: make(chan monitorTask),
|
||||
errors: make(chan error),
|
||||
running: make(map[string]*monitorTask),
|
||||
}
|
||||
}
|
||||
|
||||
const commandTemplate = "-I1 --flat-playlist --print webpage_url $1"
|
||||
|
||||
var argsSplitterRe = regexp.MustCompile(`(?mi)[^\s"']+|"([^"]*)"|'([^']*)'`)
|
||||
|
||||
func (t *CronTaskRunner) Submit(subcription *domain.Subscription) error {
|
||||
@@ -54,7 +57,8 @@ func (t *CronTaskRunner) Submit(subcription *domain.Subscription) error {
|
||||
return err
|
||||
}
|
||||
|
||||
job := taskPair{
|
||||
job := monitorTask{
|
||||
Done: make(chan struct{}),
|
||||
Schedule: schedule,
|
||||
Subscription: subcription,
|
||||
}
|
||||
@@ -64,54 +68,110 @@ func (t *CronTaskRunner) Submit(subcription *domain.Subscription) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handles the entire lifecylce of a monitor job.
|
||||
func (t *CronTaskRunner) Spawner(ctx context.Context) {
|
||||
for task := range t.tasks {
|
||||
for req := range t.tasks {
|
||||
t.running[req.Subscription.Id] = &req // keep track of the current job
|
||||
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(ctx) // inject into the job's context a cancellation singal
|
||||
fetcherEvents := t.doFetch(ctx, &req) // retrieve the channel of events of the job
|
||||
|
||||
for {
|
||||
slog.Info("fetching latest video for channel", slog.String("channel", task.Subscription.URL))
|
||||
|
||||
fetcherParams := strings.Split(strings.Replace(commandTemplate, "$1", task.Subscription.URL, 1), " ")
|
||||
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
config.Instance().DownloaderPath,
|
||||
fetcherParams...,
|
||||
)
|
||||
|
||||
stdout, err := cmd.Output()
|
||||
if err != nil {
|
||||
t.errors <- err
|
||||
select {
|
||||
case <-req.Done:
|
||||
slog.Info("stopping cron job and removing schedule", slog.String("url", req.Subscription.URL))
|
||||
cancel()
|
||||
return
|
||||
case <-fetcherEvents:
|
||||
slog.Info("finished monitoring channel", slog.String("url", req.Subscription.URL))
|
||||
}
|
||||
|
||||
latestChannelURL := string(bytes.Trim(stdout, "\n"))
|
||||
|
||||
p := &internal.Process{
|
||||
Url: latestChannelURL,
|
||||
Params: append(argsSplitterRe.FindAllString(task.Subscription.Params, 1), []string{
|
||||
"--download-archive",
|
||||
filepath.Join(config.Instance().Dir(), "archive.txt"),
|
||||
}...),
|
||||
AutoRemove: true,
|
||||
}
|
||||
|
||||
t.db.Set(p)
|
||||
t.mq.Publish(p)
|
||||
|
||||
nextSchedule := time.Until(task.Schedule.Next(time.Now()))
|
||||
|
||||
slog.Info(
|
||||
"cron task runner next schedule",
|
||||
slog.String("url", task.Subscription.URL),
|
||||
slog.Any("duration", nextSchedule),
|
||||
)
|
||||
|
||||
time.Sleep(nextSchedule)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *CronTaskRunner) Recoverer() {
|
||||
panic("Unimplemented")
|
||||
// Stop a currently scheduled job
|
||||
func (t *CronTaskRunner) StopTask(id string) error {
|
||||
task := t.running[id]
|
||||
if task != nil {
|
||||
t.running[id].Done <- struct{}{}
|
||||
delete(t.running, id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start a fetcher and notify on a channel when a fetcher has completed
|
||||
func (t *CronTaskRunner) doFetch(ctx context.Context, req *monitorTask) <-chan struct{} {
|
||||
completed := make(chan struct{})
|
||||
|
||||
// generator func
|
||||
go func() {
|
||||
for {
|
||||
sleepFor := t.fetcher(ctx, req)
|
||||
completed <- struct{}{}
|
||||
|
||||
time.Sleep(sleepFor)
|
||||
}
|
||||
}()
|
||||
|
||||
return completed
|
||||
}
|
||||
|
||||
// Perform the retrieval of the latest video of the channel.
|
||||
// Returns a time.Duration containing the amount of time to the next schedule.
|
||||
func (t *CronTaskRunner) fetcher(ctx context.Context, req *monitorTask) time.Duration {
|
||||
slog.Info("fetching latest video for channel", slog.String("channel", req.Subscription.URL))
|
||||
|
||||
nextSchedule := time.Until(req.Schedule.Next(time.Now()))
|
||||
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
config.Instance().DownloaderPath,
|
||||
"-I1",
|
||||
"--flat-playlist",
|
||||
"--print", "webpage_url",
|
||||
req.Subscription.URL,
|
||||
)
|
||||
|
||||
stdout, err := cmd.Output()
|
||||
if err != nil {
|
||||
t.errors <- err
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
latestVideoURL := string(bytes.Trim(stdout, "\n"))
|
||||
|
||||
// if the download exists there's not point in sending it into the message queue.
|
||||
exists, err := archive.DownloadExists(ctx, latestVideoURL)
|
||||
if exists && err == nil {
|
||||
return nextSchedule
|
||||
}
|
||||
|
||||
p := &internal.Process{
|
||||
Url: latestVideoURL,
|
||||
Params: append(
|
||||
argsSplitterRe.FindAllString(req.Subscription.Params, 1),
|
||||
[]string{
|
||||
"--break-on-existing",
|
||||
"--download-archive",
|
||||
filepath.Join(config.Instance().Dir(), "archive.txt"),
|
||||
}...),
|
||||
AutoRemove: true,
|
||||
}
|
||||
|
||||
t.db.Set(p) // give it an id
|
||||
t.mq.Publish(p) // send it to the message queue waiting to be processed
|
||||
|
||||
slog.Info(
|
||||
"cron task runner next schedule",
|
||||
slog.String("url", req.Subscription.URL),
|
||||
slog.Any("duration", nextSchedule),
|
||||
)
|
||||
|
||||
return nextSchedule
|
||||
}
|
||||
|
||||
func (t *CronTaskRunner) Recoverer() {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user