Compare commits

...

37 Commits

Author SHA1 Message Date
991bea1a7b refactoring: config struct & pipelines 2025-09-04 15:33:07 +02:00
5dbe6d886f Merge remote-tracking branch 'origin/master' into feat-pipelines 2025-09-02 10:18:31 +02:00
Marco Piovanello
8c06485880 fixed authentication middleware 2025-09-01 18:31:01 +02:00
658d43f9ea migrated to boltdb from sqlite + session files 2025-08-31 20:58:54 +02:00
Marco Piovanello
ccb6bbe3e6 fixed auth middleware 2025-08-31 13:16:36 +02:00
4c35b0b41f refactoring-1
introduced pipelines and abstracted download process.go in Downloader interface
2025-08-30 10:18:41 +02:00
9ca7bb9377 updated twitch dialog component labels 2025-08-28 20:30:36 +02:00
bce696fc67 fixed version string 2025-08-28 14:42:18 +02:00
22caf8899b added twitch frontend components 2025-08-28 14:40:04 +02:00
2a11f64935 default value in twitch config 2025-08-27 10:10:54 +02:00
Marco Piovanello
f4a0f688af Feat twitch livestreams (#334)
* backend code

* fixed twitch authentication
2025-08-25 12:54:16 +02:00
Marco Piovanello
14a03d6a77 Prevent RCEs with crafted inputs 2025-07-23 10:21:34 +02:00
Marco Piovanello
8a73079fad Update Dockerfile 2025-04-13 20:13:59 +02:00
f578f44cfd refactor: prevent multiple slashes 2025-03-30 10:29:13 +02:00
cbe16c5c6c refactoring: readded abort controller to httpClient.ts 2025-03-30 10:21:19 +02:00
3cebaf7f61 refactor: extra slashes prevention 2025-03-30 10:17:30 +02:00
Marco Piovanello
2d2cb1dc3a Update README.md 2025-03-30 09:54:27 +02:00
Marco Piovanello
43bcc40907 293 tiny gui improvement (#296)
* clicking on the speed dial will open download dialog

* refactor: prevent multiple slashes
2025-03-29 21:27:28 +01:00
Marco Piovanello
2af27e51be Chore dockerfile refactor (#287)
* removed yt-dlp alpine package

* use python3-alpine base image
2025-03-22 16:17:25 +01:00
Marco Piovanello
8c18242aaf removed yt-dlp alpine package (#286) 2025-03-22 15:27:48 +01:00
Marco Piovanello
66bebb2529 Update README.md 2025-03-17 11:23:29 +01:00
Marco Piovanello
e223e030ac restrict user with a whitelist (#282) 2025-03-17 11:13:20 +01:00
e4362468f7 fixed livestreams not being monitored 2025-03-15 11:08:08 +01:00
6880f60d14 Code refactoring, added clear button 2025-03-13 11:22:17 +01:00
Marco Piovanello
5d4aa7e2a3 Update README.md 2025-03-09 17:07:56 +01:00
Piotr Hajdas
2845196bc7 Add one-click deploy options for AWS, DigitalOcean, and Render in README (#268) 2025-02-20 09:47:11 +01:00
LelieL91
983915f8aa Fixed static file location (#263)
* Update EN, IT langs

Fixed EN lang mistype error
Added missing IT keys + added more translations

* Fixed files location

- livestreams.dat now uses same location as session.data (if specified on config.yml)
- .db.lock now uses same location as database file (if specified on config.yml)

* Update migrate.go

revert edit

---------

Co-authored-by: Marco Piovanello <35533749+marcopiovanello@users.noreply.github.com>
2025-02-07 22:00:11 +01:00
ce2fb13ef2 code refactoring 2025-02-07 10:13:35 +01:00
99069fe5f7 fixed proxy subdir malformed string 2025-02-07 09:45:26 +01:00
761f26b387 subscriptions: prevent downloading already existing file 2025-02-07 09:37:47 +01:00
eec72bb6e2 handle cancellation of scheduled cron jobs 2025-02-06 19:28:03 +01:00
ceb92d066c code refactoring 2025-02-06 19:27:38 +01:00
Marco Piovanello
cf74948840 initial support for playlist modifiers (#262)
supported modifiers are --playlist-start, --playlist-end, --playlist-reverse, --max-downloads
2025-02-06 11:30:28 +01:00
LelieL91
1c62084c7b Update EN, IT langs (#261)
Fixed EN lang mistype error
Added missing IT keys + added more translations

Co-authored-by: Marco Piovanello <35533749+marcopiovanello@users.noreply.github.com>
2025-02-06 09:30:21 +01:00
3c21253562 added latest keys to each language file (not translated) 2025-02-05 11:08:15 +01:00
b243c1c958 hotfix for #259 2025-02-05 10:49:15 +01:00
Marco Piovanello
7be5bc7b1f Update README.md 2025-02-05 09:16:05 +01:00
98 changed files with 3310 additions and 1547 deletions

View File

@@ -0,0 +1 @@
docker run -d -p 3033:3033 -v /downloads:/downloads marcobaobao/yt-dlp-webui

View File

@@ -3,6 +3,7 @@
result/
result
dist
.pnpm-store/
.pnpm-debug.log
node_modules
.env
@@ -20,9 +21,11 @@ cookies.txt
__debug*
ui/
.idea
.idea/
frontend/.pnp.cjs
frontend/.pnp.loader.mjs
frontend/.yarn/install-state.gz
.db.lock
livestreams.dat
.git
.vite/deps
archive.txt

1
.gitignore vendored
View File

@@ -29,3 +29,4 @@ frontend/.yarn/install-state.gz
livestreams.dat
.vite/deps
archive.txt
twitch-monitor.dat

View File

@@ -24,11 +24,12 @@ COPY --from=ui /usr/src/yt-dlp-webui/frontend /usr/src/yt-dlp-webui/frontend
RUN CGO_ENABLED=0 GOOS=linux go build -o yt-dlp-webui
# -----------------------------------------------------------------------------
# dependencies ----------------------------------------------------------------
FROM alpine:edge
# Runtime ---------------------------------------------------------------------
FROM python:3.13.2-alpine3.21
RUN apk update && \
apk add ffmpeg yt-dlp ca-certificates curl wget psmisc
apk add ffmpeg ca-certificates curl wget gnutls --no-cache && \
pip install "yt-dlp[default,curl-cffi,mutagen,pycryptodomex,phantomjs,secretstorage]"
VOLUME /downloads /config
@@ -39,4 +40,4 @@ COPY --from=build /usr/src/yt-dlp-webui/yt-dlp-webui /app
ENV JWT_SECRET=secret
EXPOSE 3033
ENTRYPOINT [ "./yt-dlp-webui" , "--out", "/downloads", "--conf", "/config/config.yml", "--db", "/config/local.db" ]
ENTRYPOINT [ "./yt-dlp-webui" , "--out", "/downloads", "--conf", "/config/config.yml", "--db", "/config/local.db" ]

View File

@@ -25,6 +25,11 @@ docker pull ghcr.io/marcopiovanello/yt-dlp-web-ui:latest
*Keeps the project alive!* 😃
## Community stuff
Feel free to join :)
[Discord](https://discord.gg/GZAX5FfGzE)
## Some screeshots
![image](https://github.com/user-attachments/assets/fc43a3fb-ecf9-449d-b5cb-5d5635020c00)
![image](https://github.com/user-attachments/assets/3210f6ac-0dd8-403c-b839-3c24ff7d7d00)
@@ -110,6 +115,16 @@ services:
restart: unless-stopped
```
### ⚡ One-Click Deploy
| Cloud Provider | Deploy Button |
|----------------|---------------|
| AWS | <a href="https://deploystack.io/deploy/marcopiovanello-yt-dlp-web-ui?provider=aws&language=cfn"><img src="https://raw.githubusercontent.com/deploystackio/deploy-templates/refs/heads/main/.assets/img/aws.svg" height="38"></a> |
| DigitalOcean | <a href="https://deploystack.io/deploy/marcopiovanello-yt-dlp-web-ui?provider=do&language=dop"><img src="https://raw.githubusercontent.com/deploystackio/deploy-templates/refs/heads/main/.assets/img/do.svg" height="38"></a> |
| Render | <a href="https://deploystack.io/deploy/marcopiovanello-yt-dlp-web-ui?provider=rnd&language=rnd"><img src="https://raw.githubusercontent.com/deploystackio/deploy-templates/refs/heads/main/.assets/img/rnd.svg" height="38"></a> |
<sub>Generated by <a href="https://deploystack.io/c/marcopiovanello-yt-dlp-web-ui" target="_blank">DeployStack.io</a></sub>
## [Prebuilt binaries](https://github.com/marcopiovanello/yt-dlp-web-ui/releases) installation
```sh

View File

@@ -1,6 +1,6 @@
{
"name": "yt-dlp-webui",
"version": "3.2.5",
"version": "3.2.6",
"description": "Frontend compontent of yt-dlp-webui",
"scripts": {
"dev": "vite --host 0.0.0.0",
@@ -18,11 +18,11 @@
"@mui/icons-material": "^6.2.0",
"@mui/material": "^6.2.0",
"fp-ts": "^2.16.5",
"jotai": "^2.10.3",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react-router-dom": "^6.23.1",
"react-virtuoso": "^4.7.11",
"jotai": "^2.10.3",
"rxjs": "^7.8.1"
},
"devDependencies": {

View File

@@ -28,6 +28,7 @@ import Footer from './components/Footer'
import Logout from './components/Logout'
import SocketSubscriber from './components/SocketSubscriber'
import ThemeToggler from './components/ThemeToggler'
import TwitchIcon from './components/TwitchIcon'
import { useI18n } from './hooks/useI18n'
import Toaster from './providers/ToasterProvider'
import { getAccentValue } from './utils'
@@ -154,6 +155,19 @@ export default function Layout() {
<ListItemText primary={i18n.t('subscriptionsButtonLabel')} />
</ListItemButton>
</Link>
<Link to={'/twitch'} style={
{
textDecoration: 'none',
color: mode === 'dark' ? '#ffffff' : '#000000DE'
}
}>
<ListItemButton>
<ListItemIcon>
<TwitchIcon />
</ListItemIcon>
<ListItemText primary={"Twitch"} />
</ListItemButton>
</Link>
<Link to={'/monitor'} style={
{
textDecoration: 'none',

View File

@@ -69,3 +69,12 @@ keys:
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -71,3 +71,12 @@ keys:
noFilesFound: 'Keine Dateien gefunden'
tableView: 'Tabellenansicht'
deleteSelected: 'Ausgewählte löschen'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -27,11 +27,11 @@ keys:
customPath: Custom path
customArgs: Enable custom yt-dlp args (great power = great responsibilities)
customArgsInput: Custom yt-dlp arguments
rpcConnErr: Error while conencting to RPC server
rpcConnErr: Error while connecting to RPC server
splashText: No active downloads
archiveTitle: Archive
clipboardAction: Copied URL to clipboard
playlistCheckbox: Download playlist (it will take time, after submitting you may close this window)
playlistCheckbox: Download playlist
restartAppMessage: Needs a page reload to take effect
servedFromReverseProxyCheckbox: Is behind a reverse proxy
urlBase: URL base, for reverse proxy support (subdir), defaults to empty
@@ -79,4 +79,8 @@ keys:
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription
newSubscriptionButton: New subscription
clearCompletedButton: 'Clear completed'
twitchIntegrationInfo: |
To enable monitoring Twitch streams follow this wiki page.
https://github.com/marcopiovanello/yt-dlp-web-ui/wiki/Twitch-integration

View File

@@ -68,4 +68,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -72,4 +72,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -70,4 +70,13 @@ keys:
deleteCookies: Sütik törlése
noFilesFound: 'Nem található fájlok'
tableView: 'Táblázatos Nézet'
deleteSelected: 'Kiválasztottak törlése'
deleteSelected: 'Kiválasztottak törlése'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -1,8 +1,9 @@
keys:
urlInput: URL Video (uno per linea)
statusTitle: Stato
startButton: Inizia
statusReady: Pronto
selectFormatButton: Seziona formato
startButton: Inizia
abortAllButton: Termina tutto
updateBinButton: Aggiorna yt-dlp
darkThemeButton: Tema scuro
@@ -22,51 +23,60 @@ keys:
pathOverrideOption: Abilita sovrascrittura percorso di output
filenameOverrideOption: Abilita sovrascrittura del nome del file di output
autoFileExtensionOption: Aggiungi estensione automaticamente
customFilename: Custom filename (leave blank to use default)
customPath: Custom path
customArgs: Enable custom yt-dlp args (great power = great responsabilities)
customArgsInput: Custom yt-dlp arguments
rpcConnErr: Error nella connessione al server RPC
customFilename: Nome file personalizzato (lascia vuoto per utilizzare quello predefinito)
customPath: Percorso personalizzato
customArgs: Abilita argomenti yt-dlp personalizzati (grande potere = grandi responsabilità)
customArgsInput: Argomenti yt-dlp personalizzati
rpcConnErr: Errore nella connessione al server RPC
splashText: Nessun download attivo
archiveTitle: Archivio
clipboardAction: URL copiato negli appunti
playlistCheckbox: Download playlist (richiederà tempo, puoi chiudere la finestra dopo l'inoltro)
restartAppMessage: La finestra deve essere ricaricata perché abbia effetto
servedFromReverseProxyCheckbox: Is behind a reverse proxy
restartAppMessage: La finestra deve essere ricaricata affinché abbia effetto
servedFromReverseProxyCheckbox: È dietro un reverse proxy
urlBase: base URL, per supporto a reverse proxy (subdir), default vuoto
newDownloadButton: Nuovo download
homeButtonLabel: Home
archiveButtonLabel: Archive
settingsButtonLabel: Settings
rpcAuthenticationLabel: RPC authentication
themeTogglerLabel: Theme toggler
loadingLabel: Loading...
archiveButtonLabel: Archivio
settingsButtonLabel: Impostazioni
rpcAuthenticationLabel: Autenticazione RPC
themeTogglerLabel: Selettore Tema
loadingLabel: Caricamento...
appTitle: Titolo applicazione
savedTemplates: Template salvati
templatesEditor: Editor template
templatesEditorNameLabel: Nome template
templatesEditorContentLabel: Contentunto template
savedTemplates: Modelli salvati
templatesEditor: Editor modelli
templatesEditorNameLabel: Nome modello
templatesEditorContentLabel: Contenuto del modello
logsTitle: 'Logs'
awaitingLogs: 'Awaiting logs...'
bulkDownload: 'Download files in a zip archive'
templatesReloadInfo: To register a new template it might need a page reload.
livestreamURLInput: Livestream URL
livestreamStatusWaiting: Waiting/Wait start
livestreamStatusDownloading: Downloading
livestreamStatusCompleted: Completed
livestreamStatusErrored: Errored
livestreamStatusUnknown: Unknown
livestreamNoMonitoring: No livestreams monitored
livestreamDownloadInfo: |
This will monitor yet to start livestream. Each process will be executed with --wait-for-video 10.
If an already started livestream is provided it will be still downloaded but its progress will not be tracked.
Once started the livestream will be migrated to the downloads page.
livestreamExperimentalWarning: This feature is still experimental. Something might break!
accentSelect: 'Accent'
urlBase: base URL, per supporto a reverse proxy (subdir), default vuoto
bulkDownload: 'Scaricare i file in un archivio zip'
rpcPollingTimeTitle: Intervallo di polling RPC
rpcPollingTimeDescription: Un intervallo più corto implica un maggior utilizzo di CPU (lato client e server)
generalDownloadSettings: 'General Download Settings'
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
templatesReloadInfo: Per registrare un nuovo modello potrebbe essere necessario ricaricare la pagina.
livestreamURLInput: Livestream URL
livestreamStatusWaiting: Attesa inizio
livestreamStatusDownloading: Downloading
livestreamStatusCompleted: Completato
livestreamStatusErrored: Errore
livestreamStatusUnknown: Sconosciuto
livestreamNoMonitoring: Nessun livestream monitorato
livestreamDownloadInfo: |
Questo monitorerà il livestream ancora da avviare. Ogni processo verrà eseguito con --wait-for-video 10.
Se viene fornito un livestream già avviato, questo verrà comunque scaricato, ma il suo progresso non verrà monitorato.
Una volta avviato, il livestream verrà migrato nella pagina dei download.
livestreamExperimentalWarning: Questa funzione è ancora sperimentale. Qualcosa potrebbe rompersi!
accentSelect: 'Accent'
generalDownloadSettings: 'Impostazioni generali di download'
deleteCookies: Elimina Cookies
noFilesFound: 'Nessun file trovato'
tableView: 'Vista Tabella'
deleteSelected: 'Elimina selezionati'
subscriptionsButtonLabel: 'Abbonamenti'
subscriptionsEmptyLabel: 'Nessuna iscrizione'
subscriptionsURLInput: 'URL Canale'
subscriptionsInfo: |
Iscrive a un canale definito. Verrà scaricato solo l'ultimo video.
Il lavoro di monitoraggio sarà programmato/attivato da un'espressione cron definita (se lasciata vuota, l'impostazione predefinita è ogni 5 minuti).
cronExpressionLabel: 'Espressione Cron'
editButtonLabel: 'Modifica'
newSubscriptionButton: Nuova iscrizione

View File

@@ -69,4 +69,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -68,4 +68,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -68,4 +68,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -70,4 +70,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -68,4 +68,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -70,4 +70,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -68,4 +68,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -70,4 +70,13 @@ keys:
deleteCookies: Delete Cookies
noFilesFound: 'No Files Found'
tableView: 'Table View'
deleteSelected: 'Delete selected'
deleteSelected: 'Delete selected'
subscriptionsButtonLabel: 'Subscriptions'
subscriptionsEmptyLabel: 'No subscriptions'
subscriptionsURLInput: 'Channel URL'
subscriptionsInfo: |
Subscribes to a defined channel. Only the last video will be downloaded.
The monitor job will be scheduled/triggered by a defined cron expression (defaults to every 5 minutes if left blank).
cronExpressionLabel: 'Cron expression'
editButtonLabel: 'Edit'
newSubscriptionButton: New subscription

View File

@@ -121,11 +121,18 @@ export const appTitleState = atomWithStorage(
export const serverAddressAndPortState = atom((get) => {
if (get(servedFromReverseProxySubDirState)) {
return `${get(serverAddressState)}/${get(servedFromReverseProxySubDirState)}/`
.replaceAll('"', '') // XXX: atomWithStorage uses JSON.stringify to serialize
.replaceAll('//', '/') // which puts extra double quotes.
}
if (get(servedFromReverseProxyState)) {
return `${get(serverAddressState)}`
.replaceAll('"', '')
}
return `${get(serverAddressState)}:${get(serverPortState)}`
const sap = `${get(serverAddressState)}:${get(serverPortState)}`
.replaceAll('"', '')
return sap.endsWith('/') ? sap.slice(0, -1) : sap
})
export const serverURL = atom((get) =>
@@ -134,15 +141,17 @@ export const serverURL = atom((get) =>
export const rpcWebSocketEndpoint = atom((get) => {
const proto = window.location.protocol === 'https:' ? 'wss:' : 'ws:'
return `${proto}//${get(serverAddressAndPortState)}/rpc/ws`
}
)
const sap = get(serverAddressAndPortState)
return `${proto}//${sap.endsWith('/') ? sap.slice(0, -1) : sap}/rpc/ws`
})
export const rpcHTTPEndpoint = atom((get) => {
const proto = window.location.protocol
return `${proto}//${get(serverAddressAndPortState)}/rpc/http`
}
)
const sap = get(serverAddressAndPortState)
return `${proto}//${sap.endsWith('/') ? sap.slice(0, -1) : sap}/rpc/http`
})
export const serverSideCookiesState = atom<Promise<string>>(async (get) => await pipe(
ffetch<Readonly<{ cookies: string }>>(`${get(serverURL)}/api/v1/cookies`),
@@ -180,5 +189,4 @@ export const settingsState = atom<SettingsState>((get) => ({
listView: get(listViewState),
servedFromReverseProxy: get(servedFromReverseProxyState),
appTitle: get(appTitleState)
})
)
}))

View File

@@ -110,7 +110,7 @@ const DownloadDialog: FC<Props> = ({ open, onClose, onDownloadStart }) => {
if (pickedAudioFormat !== '') codes.push(pickedAudioFormat)
if (pickedBestFormat !== '') codes.push(pickedBestFormat)
const downloadTemplate = `${customArgsState} ${cookies}`
const downloadTemplate = `${customArgs} ${cookies}`
.replace(/ +/g, ' ')
.trim()

View File

@@ -1,5 +1,6 @@
import AddCircleIcon from '@mui/icons-material/AddCircle'
import BuildCircleIcon from '@mui/icons-material/BuildCircle'
import ClearAllIcon from '@mui/icons-material/ClearAll'
import DeleteForeverIcon from '@mui/icons-material/DeleteForever'
import FolderZipIcon from '@mui/icons-material/FolderZip'
import FormatListBulleted from '@mui/icons-material/FormatListBulleted'
@@ -42,6 +43,11 @@ const HomeSpeedDial: React.FC<Props> = ({ onDownloadOpen, onEditorOpen }) => {
tooltipTitle={i18n.t('bulkDownload')}
onClick={() => window.open(`${serverAddr}/archive/bulk?token=${localStorage.getItem('token')}`)}
/>
<SpeedDialAction
icon={<ClearAllIcon />}
tooltipTitle={i18n.t('clearCompletedButton')}
onClick={() => client.clearCompleted()}
/>
<SpeedDialAction
icon={<DeleteForeverIcon />}
tooltipTitle={i18n.t('abortAllButton')}

View File

@@ -0,0 +1,22 @@
import { useAtomValue } from 'jotai'
import { settingsState } from '../atoms/settings'
const TwitchIcon: React.FC = () => {
const { theme } = useAtomValue(settingsState)
return (
<svg
role="img"
viewBox="0 0 24 24"
width={24}
height={24}
xmlns="http://www.w3.org/2000/svg"
style={{ fill: theme === 'dark' ? '#fff' : '#757575' }}
>
<title>Twitch</title>
<path d="M11.571 4.714h1.715v5.143H11.57zm4.715 0H18v5.143h-1.714zM6 0L1.714 4.286v15.428h5.143V24l4.286-4.286h3.428L22.286 12V0zm14.571 11.143l-3.428 3.428h-3.429l-3 3v-3H6.857V1.714h13.714Z" />
</svg>
)
}
export default TwitchIcon

View File

@@ -0,0 +1,140 @@
import CloseIcon from '@mui/icons-material/Close'
import {
Alert,
AppBar,
Box,
Button,
Container,
Dialog,
Grid,
IconButton,
Paper,
Slide,
TextField,
Toolbar,
Typography
} from '@mui/material'
import { TransitionProps } from '@mui/material/transitions'
import { matchW } from 'fp-ts/lib/Either'
import { pipe } from 'fp-ts/lib/function'
import { useAtomValue } from 'jotai'
import { forwardRef, startTransition, useState } from 'react'
import { serverURL } from '../../atoms/settings'
import { useToast } from '../../hooks/toast'
import { useI18n } from '../../hooks/useI18n'
import { ffetch } from '../../lib/httpClient'
type Props = {
open: boolean
onClose: () => void
}
const Transition = forwardRef(function Transition(
props: TransitionProps & {
children: React.ReactElement
},
ref: React.Ref<unknown>,
) {
return <Slide direction="up" ref={ref} {...props} />
})
const TwitchDialog: React.FC<Props> = ({ open, onClose }) => {
const [channelURL, setChannelURL] = useState('')
const { i18n } = useI18n()
const { pushMessage } = useToast()
const baseURL = useAtomValue(serverURL)
const submit = async (channelURL: string) => {
const task = ffetch<void>(`${baseURL}/twitch/user`, {
method: 'POST',
body: JSON.stringify({
user: channelURL.split('/').at(-1)
})
})
const either = await task()
pipe(
either,
matchW(
(l) => pushMessage(l, 'error'),
(_) => onClose()
)
)
}
return (
<Dialog
fullScreen
open={open}
onClose={onClose}
TransitionComponent={Transition}
>
<AppBar sx={{ position: 'relative' }}>
<Toolbar>
<IconButton
edge="start"
color="inherit"
onClick={onClose}
aria-label="close"
>
<CloseIcon />
</IconButton>
<Typography sx={{ ml: 2, flex: 1 }} variant="h6" component="div">
{i18n.t('subscriptionsButtonLabel')}
</Typography>
</Toolbar>
</AppBar>
<Box sx={{
backgroundColor: (theme) => theme.palette.background.default,
minHeight: (theme) => `calc(99vh - ${theme.mixins.toolbar.minHeight}px)`
}}>
<Container sx={{ my: 4 }}>
<Grid container spacing={2}>
<Grid item xs={12}>
<Paper
elevation={4}
sx={{
p: 2,
display: 'flex',
flexDirection: 'column',
}}
>
<Grid container gap={1.5}>
<Grid item xs={12}>
<Alert severity="info">
{i18n.t('twitchIntegrationInfo')}
</Alert>
</Grid>
<Grid item xs={12} mt={1}>
<TextField
multiline
fullWidth
label={i18n.t('subscriptionsURLInput')}
variant="outlined"
placeholder="https://www.twitch.tv/a_twitch_user_that_exists"
onChange={(e) => setChannelURL(e.target.value)}
/>
</Grid>
<Grid item xs={12}>
<Button
sx={{ mt: 2 }}
variant="contained"
disabled={channelURL === ''}
onClick={() => startTransition(() => submit(channelURL))}
>
{i18n.t('startButton')}
</Button>
</Grid>
</Grid>
</Paper>
</Grid>
</Grid>
</Container>
</Box>
</Dialog>
)
}
export default TwitchDialog

View File

@@ -1,6 +1,9 @@
import { tryCatch } from 'fp-ts/TaskEither'
import * as J from 'fp-ts/Json'
import * as E from 'fp-ts/Either'
import { pipe } from 'fp-ts/lib/function'
async function fetcher<T>(url: string, opt?: RequestInit): Promise<T> {
async function fetcher(url: string, opt?: RequestInit, controller?: AbortController): Promise<string> {
const jwt = localStorage.getItem('token')
if (opt && !opt.headers) {
@@ -14,17 +17,27 @@ async function fetcher<T>(url: string, opt?: RequestInit): Promise<T> {
headers: {
...opt?.headers,
'X-Authentication': jwt ?? ''
}
},
signal: controller?.signal
})
if (!res.ok) {
throw await res.text()
}
return res.json() as T
return res.text()
}
export const ffetch = <T>(url: string, opt?: RequestInit) => tryCatch(
() => fetcher<T>(url, opt),
export const ffetch = <T>(url: string, opt?: RequestInit, controller?: AbortController) => tryCatch(
async () => pipe(
await fetcher(url, opt, controller),
J.parse,
E.match(
(l) => l as T,
(r) => r as T
)
),
(e) => `error while fetching: ${e}`
)

View File

@@ -200,4 +200,11 @@ export class RPCClient {
params: []
})
}
public clearCompleted() {
return this.sendHTTP({
method: 'Service.ClearCompleted',
params: []
})
}
}

View File

@@ -6,6 +6,7 @@ import Terminal from './views/Terminal'
const Home = lazy(() => import('./views/Home'))
const Login = lazy(() => import('./views/Login'))
const Twitch = lazy(() => import('./views/Twitch'))
const Archive = lazy(() => import('./views/Archive'))
const Settings = lazy(() => import('./views/Settings'))
const LiveStream = lazy(() => import('./views/Livestream'))
@@ -111,6 +112,14 @@ export const router = createHashRouter([
</Suspense >
)
},
{
path: '/twitch',
element: (
<Suspense fallback={<CircularProgress />}>
<Twitch />
</Suspense >
)
},
]
},
])

View File

@@ -13,6 +13,7 @@ export type RPCMethods =
| "Service.ProgressLivestream"
| "Service.KillLivestream"
| "Service.KillAllLivestream"
| "Service.ClearCompleted"
export type RPCRequest = {
method: RPCMethods

View File

@@ -0,0 +1,77 @@
import {
Chip,
Container,
Paper
} from '@mui/material'
import { matchW } from 'fp-ts/lib/Either'
import { pipe } from 'fp-ts/lib/function'
import { useAtomValue } from 'jotai'
import { useState, useTransition } from 'react'
import { serverURL } from '../atoms/settings'
import LoadingBackdrop from '../components/LoadingBackdrop'
import NoSubscriptions from '../components/subscriptions/NoSubscriptions'
import SubscriptionsSpeedDial from '../components/subscriptions/SubscriptionsSpeedDial'
import TwitchDialog from '../components/twitch/TwitchDialog'
import { useToast } from '../hooks/toast'
import useFetch from '../hooks/useFetch'
import { ffetch } from '../lib/httpClient'
const TwitchView: React.FC = () => {
const { pushMessage } = useToast()
const baseURL = useAtomValue(serverURL)
const [openDialog, setOpenDialog] = useState(false)
const { data: users, fetcher: refetch } = useFetch<Array<string>>('/twitch/users')
const [isPending, startTransition] = useTransition()
const deleteUser = async (user: string) => {
const task = ffetch<void>(`${baseURL}/twitch/user/${user}`, {
method: 'DELETE',
})
const either = await task()
pipe(
either,
matchW(
(l) => pushMessage(l, 'error'),
() => refetch()
)
)
}
return (
<>
<LoadingBackdrop isLoading={!users || isPending} />
<SubscriptionsSpeedDial onOpen={() => setOpenDialog(s => !s)} />
<TwitchDialog open={openDialog} onClose={() => {
setOpenDialog(s => !s)
refetch()
}} />
{
!users || users.length === 0 ?
<NoSubscriptions /> :
<Container maxWidth="xl" sx={{ mt: 4, mb: 8 }}>
<Paper sx={{
p: 2.5,
minHeight: '80vh',
}}>
{users.map(user => (
<Chip
label={user}
onDelete={() => startTransition(async () => await deleteUser(user))}
/>
))}
</Paper>
</Container>
}
</>
)
}
export default TwitchView

47
go.mod
View File

@@ -1,32 +1,35 @@
module github.com/marcopiovanello/yt-dlp-web-ui/v3
go 1.23
go 1.24
require (
github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef
github.com/coreos/go-oidc/v3 v3.12.0
github.com/go-chi/chi/v5 v5.2.0
github.com/go-chi/cors v1.2.1
github.com/golang-jwt/jwt/v5 v5.2.1
github.com/coreos/go-oidc/v3 v3.15.0
github.com/go-chi/chi/v5 v5.2.3
github.com/go-chi/cors v1.2.2
github.com/golang-jwt/jwt/v5 v5.3.0
github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.3
github.com/robfig/cron/v3 v3.0.0
golang.org/x/oauth2 v0.25.0
golang.org/x/sync v0.10.0
golang.org/x/sys v0.29.0
gopkg.in/yaml.v3 v3.0.1
modernc.org/sqlite v1.34.5
github.com/robfig/cron/v3 v3.0.1
github.com/spf13/viper v1.20.1
go.etcd.io/bbolt v1.4.3
golang.org/x/crypto v0.41.0
golang.org/x/oauth2 v0.30.0
golang.org/x/sys v0.35.0
)
require (
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-jose/go-jose/v4 v4.0.4 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c // indirect
modernc.org/libc v1.61.11 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.8.2 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.2 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/text v0.28.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

129
go.sum
View File

@@ -1,79 +1,76 @@
github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef h1:2JGTg6JapxP9/R33ZaagQtAM4EkkSYnIAlOG5EI8gkM=
github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef/go.mod h1:JS7hed4L1fj0hXcyEejnW57/7LCetXggd+vwrRnYeII=
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/coreos/go-oidc/v3 v3.15.0 h1:R6Oz8Z4bqWR7VFQ+sPSvZPQv4x8M+sJkDO5ojgwlyAg=
github.com/coreos/go-oidc/v3 v3.15.0/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0=
github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4=
github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/go-chi/cors v1.2.2 h1:Jmey33TE+b+rB7fT8MUy1u0I4L+NARQlK6LhzKPSyQE=
github.com/go-chi/cors v1.2.2/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI=
github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=
github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
modernc.org/cc/v4 v4.24.4 h1:TFkx1s6dCkQpd6dKurBNmpo+G8Zl4Sq/ztJ+2+DEsh0=
modernc.org/cc/v4 v4.24.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.23.15 h1:wFDan71KnYqeHz4eF63vmGE6Q6Pc0PUGDpP0PRMYjDc=
modernc.org/ccgo/v4 v4.23.15/go.mod h1:nJX30dks/IWuBOnVa7VRii9Me4/9TZ1SC9GNtmARTy0=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.6.2 h1:YBXi5Kqp6aCK3fIxwKQ3/fErvawVKwjOLItxj1brGds=
modernc.org/gc/v2 v2.6.2/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/libc v1.61.11 h1:6sZG8uB6EMMG7iTLPTndi8jyTdgAQNIeLGjCFICACZw=
modernc.org/libc v1.61.11/go.mod h1:HHX+srFdn839oaJRd0W8hBM3eg+mieyZCAjWwB08/nM=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.8.2 h1:cL9L4bcoAObu4NkxOlKWBWtNHIsnnACGF/TbqQ6sbcI=
modernc.org/memory v1.8.2/go.mod h1:ZbjSvMO5NQ1A2i3bWeDiVMxIorXwdClKE/0SZ+BMotU=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g=
modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=

161
main.go
View File

@@ -1,117 +1,102 @@
package main
import (
"context"
"embed"
"flag"
"io/fs"
"log"
"log/slog"
"os"
"os/signal"
"runtime"
"syscall"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/cli"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/openid"
"github.com/spf13/viper"
)
var (
host string
port int
queueSize int
configFile string
downloadPath string
downloaderPath string
sessionFilePath string
localDatabasePath string
frontendPath string
//go:embed frontend/dist/index.html
//go:embed frontend/dist/assets/*
var frontend embed.FS
requireAuth bool
username string
password string
userFromEnv = os.Getenv("USERNAME")
passFromEnv = os.Getenv("PASSWORD")
logFile string
enableFileLogging bool
//go:embed frontend/dist/index.html
//go:embed frontend/dist/assets/*
frontend embed.FS
//go:embed openapi/*
swagger embed.FS
)
func init() {
flag.StringVar(&host, "host", "0.0.0.0", "Host where server will listen at")
flag.IntVar(&port, "port", 3033, "Port where server will listen at")
flag.IntVar(&queueSize, "qs", 2, "Queue size (concurrent downloads)")
flag.StringVar(&configFile, "conf", "./config.yml", "Config file path")
flag.StringVar(&downloadPath, "out", ".", "Where files will be saved")
flag.StringVar(&downloaderPath, "driver", "yt-dlp", "yt-dlp executable path")
flag.StringVar(&sessionFilePath, "session", ".", "session file path")
flag.StringVar(&localDatabasePath, "db", "local.db", "local database path")
flag.StringVar(&frontendPath, "web", "", "frontend web resources path")
flag.BoolVar(&enableFileLogging, "fl", false, "enable outputting logs to a file")
flag.StringVar(&logFile, "lf", "yt-dlp-webui.log", "set log file location")
flag.BoolVar(&requireAuth, "auth", false, "Enable RPC authentication")
flag.StringVar(&username, "user", userFromEnv, "Username required for auth")
flag.StringVar(&password, "pass", passFromEnv, "Password required for auth")
flag.Parse()
}
//go:embed openapi/*
var swagger embed.FS
func main() {
frontend, err := fs.Sub(frontend, "frontend/dist")
if err != nil {
log.Fatalln(err)
// Parse optional config path from flag
var configFile string
flag.StringVar(&configFile, "conf", "./config.yml", "Config file path")
flag.Parse()
v := viper.New()
v.SetConfigFile(configFile)
v.SetConfigType("yaml")
// Defaults
v.SetDefault("server.host", "0.0.0.0")
v.SetDefault("server.port", 3033)
v.SetDefault("server.queue_size", 2)
v.SetDefault("paths.download_path", ".")
v.SetDefault("paths.downloader_path", "yt-dlp")
v.SetDefault("paths.local_database_path", ".")
v.SetDefault("logging.log_path", "yt-dlp-webui.log")
v.SetDefault("logging.enable_file_logging", false)
v.SetDefault("authentication.require_auth", false)
// Env binding
v.SetEnvPrefix("APP")
v.AutomaticEnv()
// Load YAML file if exists
if err := v.ReadInConfig(); err != nil {
slog.Debug("using defaults")
}
if frontendPath != "" {
frontend = os.DirFS(frontendPath)
cfg := config.Instance()
if err := v.Unmarshal(&cfg); err != nil {
slog.Error("failed to load config", "error", err)
}
c := config.Instance()
{
// init the config struct with the values from flags
// TODO: find an alternative way to populate the config struct from flags or config file
c.Host = host
c.Port = port
c.QueueSize = queueSize
c.DownloadPath = downloadPath
c.DownloaderPath = downloaderPath
c.SessionFilePath = sessionFilePath
c.LocalDatabasePath = localDatabasePath
c.LogPath = logFile
c.EnableFileLogging = enableFileLogging
c.RequireAuth = requireAuth
c.Username = username
c.Password = password
if cfg.Server.QueueSize <= 0 || runtime.NumCPU() <= 2 {
cfg.Server.QueueSize = 2
}
// limit concurrent downloads for systems with 2 or less logical cores
if runtime.NumCPU() <= 2 {
c.QueueSize = 1
}
// if config file is found it will be merged with the current config struct
if err := c.LoadFile(configFile); err != nil {
log.Println(cli.BgRed, "config", cli.Reset, err)
// 6. Frontend FS
var appFS fs.FS
if fp := v.GetString("frontend_path"); fp != "" {
appFS = os.DirFS(fp)
} else {
sub, err := fs.Sub(frontend, "frontend/dist")
if err != nil {
slog.Error("failed to load embedded frontend", "error", err)
os.Exit(1)
}
appFS = sub
}
// Configure OpenID if needed
openid.Configure()
server.RunBlocking(&server.RunConfig{
App: frontend,
// Graceful shutdown
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
slog.Info("starting server",
"host", cfg.Server.Host,
"port", cfg.Server.Port,
"queue_size", cfg.Server.QueueSize,
)
if err := server.Run(ctx, &server.RunConfig{
App: appFS,
Swagger: swagger,
})
}); err != nil {
slog.Error("server stopped with error", "error", err)
os.Exit(1)
}
slog.Info("server exited cleanly")
}

View File

@@ -146,10 +146,10 @@ func (h *Handler) GetCursor() http.HandlerFunc {
// ApplyRouter implements domain.RestHandler.
func (h *Handler) ApplyRouter() func(chi.Router) {
return func(r chi.Router) {
if config.Instance().RequireAuth {
if config.Instance().Authentication.RequireAuth {
r.Use(middlewares.Authenticated)
}
if config.Instance().UseOpenId {
if config.Instance().OpenId.UseOpenId {
r.Use(openid.Middleware)
}

58
server/archive/utils.go Normal file
View File

@@ -0,0 +1,58 @@
package archive
import (
"bufio"
"bytes"
"context"
"os"
"os/exec"
"path/filepath"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
)
// Perform a search on the archive.txt file an determines if a download
// has already be done.
func DownloadExists(ctx context.Context, url string) (bool, error) {
cmd := exec.CommandContext(
ctx,
config.Instance().Paths.DownloaderPath,
"--print",
"%(extractor)s %(id)s",
url,
)
stdout, err := cmd.Output()
if err != nil {
return false, err
}
extractorAndURL := bytes.Trim(stdout, "\n")
fd, err := os.Open(filepath.Join(config.Instance().Dir(), "archive.txt"))
if err != nil {
return false, err
}
defer fd.Close()
scanner := bufio.NewScanner(fd)
// search linearly for lower memory usage...
// the a pre-sorted with hashed values version of the archive.txt file can be loaded in memory
// and perform a binary search on it.
for scanner.Scan() {
if bytes.Equal(scanner.Bytes(), extractorAndURL) {
return true, nil
}
}
// data, err := io.ReadAll(fd)
// if err != nil {
// return false, err
// }
// slices.BinarySearchFunc(data, extractorAndURL, func(a []byte, b []byte) int {
// return hash(a).Compare(hash(b))
// })
return false, nil
}

View File

@@ -5,15 +5,12 @@ import (
"database/sql"
"log/slog"
evbus "github.com/asaskevich/EventBus"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/archive"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
)
const QueueName = "process:archive"
var (
eventBus = evbus.New()
ch = make(chan *Message, 1)
archiveService archive.Service
)
@@ -25,18 +22,20 @@ func Register(db *sql.DB) {
}
func init() {
eventBus.Subscribe(QueueName, func(m *Message) {
slog.Info(
"archiving completed download",
slog.String("title", m.Title),
slog.String("source", m.Source),
)
archiveService.Archive(context.Background(), m)
})
go func() {
for m := range ch {
slog.Info(
"archiving completed download",
slog.String("title", m.Title),
slog.String("source", m.Source),
)
archiveService.Archive(context.Background(), m)
}
}()
}
func Publish(m *Message) {
if config.Instance().AutoArchive {
eventBus.Publish(QueueName, m)
ch <- m
}
}

18
server/common/types.go Normal file
View File

@@ -0,0 +1,18 @@
package common
import "time"
// Used to deser the yt-dlp -J output
type DownloadMetadata struct {
URL string `json:"url"`
Title string `json:"title"`
Thumbnail string `json:"thumbnail"`
Resolution string `json:"resolution"`
Size int32 `json:"filesize_approx"`
VCodec string `json:"vcodec"`
ACodec string `json:"acodec"`
Extension string `json:"ext"`
OriginalURL string `json:"original_url"`
FileName string `json:"filename"`
CreatedAt time.Time `json:"created_at"`
}

View File

@@ -1,35 +1,64 @@
package config
import (
"os"
"path/filepath"
"sync"
"gopkg.in/yaml.v3"
"time"
)
type Config struct {
LogPath string `yaml:"log_path"`
EnableFileLogging bool `yaml:"enable_file_logging"`
BaseURL string `yaml:"base_url"`
Host string `yaml:"host"`
Port int `yaml:"port"`
DownloadPath string `yaml:"downloadPath"`
DownloaderPath string `yaml:"downloaderPath"`
RequireAuth bool `yaml:"require_auth"`
Username string `yaml:"username"`
Password string `yaml:"password"`
QueueSize int `yaml:"queue_size"`
LocalDatabasePath string `yaml:"local_database_path"`
SessionFilePath string `yaml:"session_file_path"`
path string // private
UseOpenId bool `yaml:"use_openid"`
OpenIdProviderURL string `yaml:"openid_provider_url"`
OpenIdClientId string `yaml:"openid_client_id"`
OpenIdClientSecret string `yaml:"openid_client_secret"`
OpenIdRedirectURL string `yaml:"openid_redirect_url"`
FrontendPath string `yaml:"frontend_path"`
AutoArchive bool `yaml:"auto_archive"`
Server ServerConfig `yaml:"server"`
Logging LoggingConfig `yaml:"logging"`
Paths PathsConfig `yaml:"paths"`
Authentication AuthConfig `yaml:"authentication"`
OpenId OpenIdConfig `yaml:"openid"`
Frontend FrontendConfig `yaml:"frontend"`
AutoArchive bool `yaml:"auto_archive"`
Twitch TwitchConfig `yaml:"twitch"`
path string
}
type ServerConfig struct {
BaseURL string `yaml:"base_url"`
Host string `yaml:"host"`
Port int `yaml:"port"`
QueueSize int `yaml:"queue_size"`
}
type LoggingConfig struct {
LogPath string `yaml:"log_path"`
EnableFileLogging bool `yaml:"enable_file_logging"`
}
type PathsConfig struct {
DownloadPath string `yaml:"download_path"`
DownloaderPath string `yaml:"downloader_path"`
LocalDatabasePath string `yaml:"local_database_path"`
}
type AuthConfig struct {
RequireAuth bool `yaml:"require_auth"`
Username string `yaml:"username"`
PasswordHash string `yaml:"password"`
}
type OpenIdConfig struct {
UseOpenId bool `yaml:"use_openid"`
ProviderURL string `yaml:"openid_provider_url"`
ClientId string `yaml:"openid_client_id"`
ClientSecret string `yaml:"openid_client_secret"`
RedirectURL string `yaml:"openid_redirect_url"`
EmailWhitelist []string `yaml:"openid_email_whitelist"`
}
type FrontendConfig struct {
FrontendPath string `yaml:"frontend_path"`
}
type TwitchConfig struct {
ClientId string `yaml:"client_id"`
ClientSecret string `yaml:"client_secret"`
CheckInterval time.Duration `yaml:"check_interval"`
}
var (
@@ -41,27 +70,12 @@ func Instance() *Config {
if instance == nil {
instanceOnce.Do(func() {
instance = &Config{}
instance.Twitch.CheckInterval = time.Minute * 5
})
}
return instance
}
// Initialises the Config struct given its config file
func (c *Config) LoadFile(filename string) error {
fd, err := os.Open(filename)
if err != nil {
return err
}
c.path = filename
if err := yaml.NewDecoder(fd).Decode(c); err != nil {
return err
}
return nil
}
// Path of the directory containing the config file
func (c *Config) Dir() string { return filepath.Dir(c.path) }

View File

@@ -19,6 +19,7 @@ import (
"github.com/go-chi/chi/v5"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
)
/*
@@ -88,7 +89,7 @@ type ListRequest struct {
}
func ListDownloaded(w http.ResponseWriter, r *http.Request) {
root := config.Instance().DownloadPath
root := config.Instance().Paths.DownloadPath
req := new(ListRequest)
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
@@ -156,7 +157,7 @@ func SendFile(w http.ResponseWriter, r *http.Request) {
filename := string(decoded)
root := config.Instance().DownloadPath
root := config.Instance().Paths.DownloadPath
if strings.Contains(filepath.Dir(filepath.Clean(filename)), filepath.Clean(root)) {
http.ServeFile(w, r, filename)
@@ -188,7 +189,7 @@ func DownloadFile(w http.ResponseWriter, r *http.Request) {
filename := string(decoded)
root := config.Instance().DownloadPath
root := config.Instance().Paths.DownloadPath
if strings.Contains(filepath.Dir(filepath.Clean(filename)), filepath.Clean(root)) {
w.Header().Add("Content-Disposition", "inline; filename=\""+filepath.Base(filename)+"\"")
@@ -207,9 +208,9 @@ func DownloadFile(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusUnauthorized)
}
func BulkDownload(mdb *internal.MemoryDB) http.HandlerFunc {
func BulkDownload(mdb *kv.Store) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ps := slices.DeleteFunc(*mdb.All(), func(e internal.ProcessResponse) bool {
ps := slices.DeleteFunc(*mdb.All(), func(e internal.ProcessSnapshot) bool {
return e.Progress.Status != internal.StatusCompleted
})

View File

@@ -10,7 +10,7 @@ import (
)
func ParseURL(url string) (*Metadata, error) {
cmd := exec.Command(config.Instance().DownloaderPath, url, "-J")
cmd := exec.Command(config.Instance().Paths.DownloaderPath, url, "-J")
stdout, err := cmd.Output()
if err != nil {

View File

@@ -1,34 +0,0 @@
package internal
import (
"container/heap"
)
type LoadBalancer struct {
pool Pool
done chan *Worker
}
func (b *LoadBalancer) Balance(work chan Process) {
for {
select {
case req := <-work:
b.dispatch(req)
case w := <-b.done:
b.completed(w)
}
}
}
func (b *LoadBalancer) dispatch(req Process) {
w := heap.Pop(&b.pool).(*Worker)
w.requests <- req
w.pending++
heap.Push(&b.pool, w)
}
func (b *LoadBalancer) completed(w *Worker) {
w.pending--
heap.Remove(&b.pool, w.index)
heap.Push(&b.pool, w)
}

View File

@@ -1,6 +1,8 @@
package internal
import "time"
import (
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
)
// Used to unmarshall yt-dlp progress
type ProgressTemplate struct {
@@ -29,35 +31,21 @@ type DownloadProgress struct {
ETA float64 `json:"eta"`
}
// Used to deser the yt-dlp -J output
type DownloadInfo struct {
URL string `json:"url"`
Title string `json:"title"`
Thumbnail string `json:"thumbnail"`
Resolution string `json:"resolution"`
Size int32 `json:"filesize_approx"`
VCodec string `json:"vcodec"`
ACodec string `json:"acodec"`
Extension string `json:"ext"`
OriginalURL string `json:"original_url"`
FileName string `json:"filename"`
CreatedAt time.Time `json:"created_at"`
}
// struct representing the response sent to the client
// as JSON-RPC result field
type ProcessResponse struct {
Id string `json:"id"`
Progress DownloadProgress `json:"progress"`
Info DownloadInfo `json:"info"`
Output DownloadOutput `json:"output"`
Params []string `json:"params"`
type ProcessSnapshot struct {
Id string `json:"id"`
Progress DownloadProgress `json:"progress"`
Info common.DownloadMetadata `json:"info"`
Output DownloadOutput `json:"output"`
Params []string `json:"params"`
DownloaderName string `json:"downloader_name"`
}
// struct representing the current status of the memoryDB
// used for serializaton/persistence reasons
type Session struct {
Processes []ProcessResponse `json:"processes"`
Snapshots []ProcessSnapshot `json:"processes"`
}
// struct representing the intent to stop a specific process
@@ -85,3 +73,11 @@ type CustomTemplate struct {
Name string `json:"name"`
Content string `json:"content"`
}
const (
StatusPending = iota
StatusDownloading
StatusCompleted
StatusErrored
StatusLiveStream
)

View File

@@ -0,0 +1,42 @@
package downloaders
import (
"log/slog"
"sync"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
)
type DownloaderBase struct {
Id string
URL string
Metadata common.DownloadMetadata
Pending bool
Completed bool
mutex sync.Mutex
}
func (d *DownloaderBase) FetchMetadata(fetcher func(url string) (*common.DownloadMetadata, error)) {
d.mutex.Lock()
defer d.mutex.Unlock()
meta, err := fetcher(d.URL)
if err != nil {
slog.Error("failed to retrieve metadata", slog.Any("err", err))
return
}
d.Metadata = *meta
}
func (d *DownloaderBase) SetPending(p bool) {
d.mutex.Lock()
defer d.mutex.Unlock()
d.Pending = p
}
func (d *DownloaderBase) Complete() {
d.mutex.Lock()
defer d.mutex.Unlock()
d.Completed = true
}

View File

@@ -0,0 +1,26 @@
package downloaders
import (
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
)
type Downloader interface {
Start() error
Stop() error
Status() *internal.ProcessSnapshot
SetOutput(output internal.DownloadOutput)
SetProgress(progress internal.DownloadProgress)
SetMetadata(fetcher func(url string) (*common.DownloadMetadata, error))
SetPending(p bool)
IsCompleted() bool
UpdateSavedFilePath(path string)
RestoreFromSnapshot(*internal.ProcessSnapshot) error
GetId() string
GetUrl() string
}

View File

@@ -0,0 +1,211 @@
package downloaders
import (
"context"
"errors"
"fmt"
"log/slog"
"os"
"os/exec"
"slices"
"strings"
"syscall"
"github.com/google/uuid"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
)
const downloadTemplate = `download:
{
"eta":%(progress.eta)s,
"percentage":"%(progress._percent_str)s",
"speed":%(progress.speed)s
}`
// filename not returning the correct extension after postprocess
const postprocessTemplate = `postprocess:
{
"filepath":"%(info.filepath)s"
}
`
type GenericDownloader struct {
Params []string
AutoRemove bool
progress internal.DownloadProgress
output internal.DownloadOutput
proc *os.Process
logConsumer LogConsumer
// embedded
DownloaderBase
}
func NewGenericDownload(url string, params []string) Downloader {
g := &GenericDownloader{
logConsumer: NewJSONLogConsumer(),
}
// in base
g.Id = uuid.NewString()
g.URL = url
return g
}
func (g *GenericDownloader) Start() error {
g.SetPending(true)
g.Params = argsSanitizer(g.Params)
out := internal.DownloadOutput{
Path: config.Instance().Paths.DownloadPath,
Filename: "%(title)s.%(ext)s",
}
if g.output.Path != "" {
out.Path = g.output.Path
}
if g.output.Filename != "" {
out.Filename = g.output.Filename
}
buildFilename(&g.output)
templateReplacer := strings.NewReplacer("\n", "", "\t", "", " ", "")
baseParams := []string{
strings.Split(g.URL, "?list")[0], //no playlist
"--newline",
"--no-colors",
"--no-playlist",
"--progress-template",
templateReplacer.Replace(downloadTemplate),
"--progress-template",
templateReplacer.Replace(postprocessTemplate),
"--no-exec",
}
// if user asked to manually override the output path...
if !(slices.Contains(g.Params, "-P") || slices.Contains(g.Params, "--paths")) {
g.Params = append(g.Params, "-o")
g.Params = append(g.Params, fmt.Sprintf("%s/%s", out.Path, out.Filename))
}
params := append(baseParams, g.Params...)
slog.Info("requesting download", slog.String("url", g.URL), slog.Any("params", params))
cmd := exec.Command(config.Instance().Paths.DownloaderPath, params...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
stdout, err := cmd.StdoutPipe()
if err != nil {
slog.Error("failed to get a stdout pipe", slog.Any("err", err))
panic(err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
slog.Error("failed to get a stderr pipe", slog.Any("err", err))
panic(err)
}
if err := cmd.Start(); err != nil {
slog.Error("failed to start yt-dlp process", slog.Any("err", err))
panic(err)
}
g.proc = cmd.Process
ctx, cancel := context.WithCancel(context.Background())
defer func() {
stdout.Close()
g.Complete()
cancel()
}()
logs := make(chan []byte)
go produceLogs(stdout, logs)
go consumeLogs(ctx, logs, g.logConsumer, g)
go printYtDlpErrors(stderr, g.Id, g.URL)
g.SetPending(false)
return cmd.Wait()
}
func (g *GenericDownloader) Stop() error {
defer func() {
g.progress.Status = internal.StatusCompleted
g.Complete()
}()
// yt-dlp uses multiple child process the parent process
// has been spawned with setPgid = true. To properly kill
// all subprocesses a SIGTERM need to be sent to the correct
// process group
if g.proc == nil {
return errors.New("*os.Process not set")
}
pgid, err := syscall.Getpgid(g.proc.Pid)
if err != nil {
return err
}
if err := syscall.Kill(-pgid, syscall.SIGTERM); err != nil {
return err
}
return nil
}
func (g *GenericDownloader) Status() *internal.ProcessSnapshot {
return &internal.ProcessSnapshot{
Id: g.Id,
Info: g.Metadata,
Progress: g.progress,
Output: g.output,
Params: g.Params,
DownloaderName: "generic",
}
}
func (g *GenericDownloader) UpdateSavedFilePath(p string) { g.output.SavedFilePath = p }
func (g *GenericDownloader) SetOutput(o internal.DownloadOutput) { g.output = o }
func (g *GenericDownloader) SetProgress(p internal.DownloadProgress) { g.progress = p }
func (g *GenericDownloader) SetMetadata(fetcher func(url string) (*common.DownloadMetadata, error)) {
g.FetchMetadata(fetcher)
}
func (g *GenericDownloader) SetPending(p bool) {
g.Pending = p
}
func (g *GenericDownloader) GetId() string { return g.Id }
func (g *GenericDownloader) GetUrl() string { return g.URL }
func (g *GenericDownloader) RestoreFromSnapshot(snap *internal.ProcessSnapshot) error {
if snap == nil {
return errors.New("cannot restore nil snapshot")
}
s := *snap
g.Id = s.Id
g.URL = s.Info.URL
g.Metadata = s.Info
g.progress = s.Progress
g.output = s.Output
g.Params = s.Params
return nil
}
func (g *GenericDownloader) IsCompleted() bool { return g.Completed }

View File

@@ -0,0 +1,205 @@
package downloaders
import (
"context"
"errors"
"fmt"
"io"
"log/slog"
"os"
"os/exec"
"path/filepath"
"slices"
"syscall"
"time"
"github.com/google/uuid"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/pipes"
)
type LiveStreamDownloader struct {
progress internal.DownloadProgress
proc *os.Process
logConsumer LogConsumer
pipes []pipes.Pipe
// embedded
DownloaderBase
}
func NewLiveStreamDownloader(url string, pipes []pipes.Pipe) Downloader {
l := &LiveStreamDownloader{
logConsumer: NewFFMpegLogConsumer(),
pipes: pipes,
}
// in base
l.Id = uuid.NewString()
l.URL = url
return l
}
func (l *LiveStreamDownloader) Start() error {
l.SetPending(true)
baseParams := []string{
l.URL,
"--newline",
"--no-colors",
"--no-playlist",
"--no-exec",
}
params := append(baseParams, "-o", "-")
cmd := exec.Command(config.Instance().Paths.DownloaderPath, params...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
// stdout = media stream
media, err := cmd.StdoutPipe()
if err != nil {
slog.Error("failed to get media stdout", slog.Any("err", err))
panic(err)
}
// stderr = log/progress
stderr, err := cmd.StderrPipe()
if err != nil {
slog.Error("failed to get stderr pipe", slog.Any("err", err))
panic(err)
}
if err := cmd.Start(); err != nil {
slog.Error("failed to start yt-dlp process", slog.Any("err", err))
panic(err)
}
l.proc = cmd.Process
ctx, cancel := context.WithCancel(context.Background())
defer func() {
l.Complete()
cancel()
}()
// --- costruisci pipeline ---
reader := io.Reader(media)
for _, pipe := range l.pipes {
nr, err := pipe.Connect(reader)
if err != nil {
slog.Error("pipe failed", slog.String("pipe", pipe.Name()), slog.Any("err", err))
return err
}
reader = nr
}
// --- fallback: se nessun FileWriter, scrivi su file ---
if !l.hasFileWriter() {
go func() {
filepath.Join(
config.Instance().Paths.DownloadPath,
fmt.Sprintf("%s (live) %s.mp4", l.Id, time.Now().Format(time.ANSIC)),
)
defaultPath := filepath.Join(config.Instance().Paths.DownloadPath)
f, err := os.Create(defaultPath)
if err != nil {
slog.Error("failed to create fallback file", slog.Any("err", err))
return
}
defer f.Close()
_, err = io.Copy(f, reader)
if err != nil {
slog.Error("copy error", slog.Any("err", err))
}
slog.Info("download saved", slog.String("path", defaultPath))
}()
}
// --- logs consumer ---
logs := make(chan []byte)
go produceLogs(stderr, logs)
go consumeLogs(ctx, logs, l.logConsumer, l)
l.progress.Status = internal.StatusLiveStream
return cmd.Wait()
}
func (l *LiveStreamDownloader) Stop() error {
defer func() {
l.progress.Status = internal.StatusCompleted
l.Complete()
}()
// yt-dlp uses multiple child process the parent process
// has been spawned with setPgid = true. To properly kill
// all subprocesses a SIGTERM need to be sent to the correct
// process group
if l.proc == nil {
return errors.New("*os.Process not set")
}
pgid, err := syscall.Getpgid(l.proc.Pid)
if err != nil {
return err
}
if err := syscall.Kill(-pgid, syscall.SIGTERM); err != nil {
return err
}
return nil
}
func (l *LiveStreamDownloader) Status() *internal.ProcessSnapshot {
return &internal.ProcessSnapshot{
Id: l.Id,
Info: l.Metadata,
Progress: l.progress,
DownloaderName: "livestream",
}
}
func (l *LiveStreamDownloader) UpdateSavedFilePath(p string) {}
func (l *LiveStreamDownloader) SetOutput(o internal.DownloadOutput) {}
func (l *LiveStreamDownloader) SetProgress(p internal.DownloadProgress) { l.progress = p }
func (l *LiveStreamDownloader) SetMetadata(fetcher func(url string) (*common.DownloadMetadata, error)) {
l.FetchMetadata(fetcher)
}
func (l *LiveStreamDownloader) SetPending(p bool) {
l.Pending = p
}
func (l *LiveStreamDownloader) GetId() string { return l.Id }
func (l *LiveStreamDownloader) GetUrl() string { return l.URL }
func (l *LiveStreamDownloader) RestoreFromSnapshot(snap *internal.ProcessSnapshot) error {
if snap == nil {
return errors.New("cannot restore nil snapshot")
}
s := *snap
l.Id = s.Id
l.URL = s.Info.URL
l.Metadata = s.Info
l.progress = s.Progress
return nil
}
func (l *LiveStreamDownloader) IsCompleted() bool { return l.Completed }
func (l *LiveStreamDownloader) hasFileWriter() bool {
return slices.ContainsFunc(l.pipes, func(p pipes.Pipe) bool {
return p.Name() == "file-writer"
})
}

View File

@@ -0,0 +1,68 @@
package downloaders
import (
"encoding/json"
"log/slog"
"strings"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
)
type LogConsumer interface {
GetName() string
ParseLogEntry(entry []byte, downloader Downloader)
}
type JSONLogConsumer struct{}
func NewJSONLogConsumer() LogConsumer {
return &JSONLogConsumer{}
}
func (j *JSONLogConsumer) GetName() string { return "json-log-consumer" }
func (j *JSONLogConsumer) ParseLogEntry(entry []byte, d Downloader) {
var progress internal.ProgressTemplate
var postprocess internal.PostprocessTemplate
if err := json.Unmarshal(entry, &progress); err == nil {
d.SetProgress(internal.DownloadProgress{
Status: internal.StatusDownloading,
Percentage: progress.Percentage,
Speed: progress.Speed,
ETA: progress.Eta,
})
slog.Info("progress",
slog.String("id", j.GetShortId(d.GetId())),
slog.String("url", d.GetUrl()),
slog.String("percentage", progress.Percentage),
)
}
if err := json.Unmarshal(entry, &postprocess); err == nil {
d.UpdateSavedFilePath(postprocess.FilePath)
}
}
func (j *JSONLogConsumer) GetShortId(id string) string {
return strings.Split(id, "-")[0]
}
//TODO: split in different files
type FFMpegLogConsumer struct{}
func NewFFMpegLogConsumer() LogConsumer {
return &JSONLogConsumer{}
}
func (f *FFMpegLogConsumer) GetName() string { return "ffmpeg-log-consumer" }
func (f *FFMpegLogConsumer) ParseLogEntry(entry []byte, d Downloader) {
slog.Info("ffmpeg output",
slog.String("id", d.GetId()),
slog.String("url", d.GetUrl()),
slog.String("output", string(entry)),
)
}

View File

@@ -0,0 +1 @@
package downloaders

View File

@@ -0,0 +1,76 @@
package downloaders
import (
"bufio"
"context"
"io"
"log/slog"
"regexp"
"slices"
"strings"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
)
func argsSanitizer(params []string) []string {
params = slices.DeleteFunc(params, func(e string) bool {
match, _ := regexp.MatchString(`(\$\{)|(\&\&)`, e)
return match
})
params = slices.DeleteFunc(params, func(e string) bool {
return e == ""
})
return params
}
func buildFilename(o *internal.DownloadOutput) {
if o.Filename != "" && strings.Contains(o.Filename, ".%(ext)s") {
o.Filename += ".%(ext)s"
}
o.Filename = strings.Replace(
o.Filename,
".%(ext)s.%(ext)s",
".%(ext)s",
1,
)
}
func produceLogs(r io.Reader, logs chan<- []byte) {
go func() {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
logs <- scanner.Bytes()
}
}()
}
func consumeLogs(ctx context.Context, logs <-chan []byte, c LogConsumer, d Downloader) {
for {
select {
case <-ctx.Done():
slog.Info("detaching logs",
slog.String("url", d.GetUrl()),
slog.String("id", c.GetName()),
)
return
case entry := <-logs:
c.ParseLogEntry(entry, d)
}
}
}
func printYtDlpErrors(stdout io.Reader, shortId, url string) {
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
slog.Error("yt-dlp process error",
slog.String("id", shortId),
slog.String("url", url),
slog.String("err", scanner.Text()),
)
}
}

173
server/internal/kv/store.go Normal file
View File

@@ -0,0 +1,173 @@
package kv
import (
"encoding/json"
"errors"
"log/slog"
"runtime"
"sync"
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
bolt "go.etcd.io/bbolt"
)
var (
bucket = []byte("downloads")
memDbEvents = make(chan downloaders.Downloader, runtime.NumCPU())
)
// In-Memory Thread-Safe Key-Value Storage with optional persistence
type Store struct {
db *bolt.DB
table map[string]downloaders.Downloader
mu sync.RWMutex
}
func NewStore(db *bolt.DB, snaptshotInteval time.Duration) (*Store, error) {
// init bucket
err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucket)
return err
})
if err != nil {
return nil, err
}
s := &Store{
db: db,
table: make(map[string]downloaders.Downloader),
}
go func() {
ticker := time.NewTicker(snaptshotInteval)
for range ticker.C {
s.Snapshot()
}
}()
return s, err
}
// Get a process pointer given its id
func (m *Store) Get(id string) (downloaders.Downloader, error) {
m.mu.RLock()
defer m.mu.RUnlock()
entry, ok := m.table[id]
if !ok {
return nil, errors.New("no process found for the given key")
}
return entry, nil
}
// Store a pointer of a process and return its id
func (m *Store) Set(d downloaders.Downloader) string {
m.mu.Lock()
m.table[d.GetId()] = d
m.mu.Unlock()
return d.GetId()
}
// Removes a process progress, given the process id
func (m *Store) Delete(id string) {
m.mu.Lock()
delete(m.table, id)
m.mu.Unlock()
}
func (m *Store) Keys() *[]string {
var running []string
m.mu.RLock()
defer m.mu.RUnlock()
for id := range m.table {
running = append(running, id)
}
return &running
}
// Returns a slice of all currently stored processes progess
func (m *Store) All() *[]internal.ProcessSnapshot {
running := []internal.ProcessSnapshot{}
m.mu.RLock()
for _, v := range m.table {
running = append(running, *(v.Status()))
}
m.mu.RUnlock()
return &running
}
// Restore a persisted state
func (m *Store) Restore(mq *queue.MessageQueue) {
m.mu.Lock()
defer m.mu.Unlock()
var snapshot []internal.ProcessSnapshot
m.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.ForEach(func(k, v []byte) error {
var snap internal.ProcessSnapshot
if err := json.Unmarshal(v, &snap); err != nil {
return err
}
snapshot = append(snapshot, snap)
return nil
})
})
for _, snap := range snapshot {
var restored downloaders.Downloader
if snap.DownloaderName == "generic" {
d := downloaders.NewGenericDownload("", []string{})
err := d.RestoreFromSnapshot(&snap)
if err != nil {
continue
}
restored = d
m.table[snap.Id] = restored
if !restored.(*downloaders.GenericDownloader).DownloaderBase.Completed {
mq.Publish(restored)
}
}
}
}
func (m *Store) EventListener() {
for p := range memDbEvents {
if p.Status().DownloaderName == "livestream" {
slog.Info("compacting Store", slog.String("id", p.GetId()))
m.Delete(p.GetId())
}
}
}
func (m *Store) Snapshot() error {
slog.Debug("snapshotting downloads state")
running := m.All()
return m.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
for _, v := range *running {
data, err := json.Marshal(v)
if err != nil {
return err
}
if err := b.Put([]byte(v.Id), data); err != nil {
return err
}
}
return nil
})
}

View File

@@ -0,0 +1,9 @@
package kv
import "github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
// struct representing the current status of the memoryDB
// used for serializaton/persistence reasons
type Session struct {
Processes []internal.ProcessSnapshot `json:"processes"`
}

View File

@@ -11,7 +11,10 @@ import (
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/pipes"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
)
const (
@@ -32,11 +35,11 @@ type LiveStream struct {
waitTime time.Duration
liveDate time.Time
mq *internal.MessageQueue
db *internal.MemoryDB
mq *queue.MessageQueue
store *kv.Store
}
func New(url string, done chan *LiveStream, mq *internal.MessageQueue, db *internal.MemoryDB) *LiveStream {
func New(url string, done chan *LiveStream, mq *queue.MessageQueue, store *kv.Store) *LiveStream {
return &LiveStream{
url: url,
done: done,
@@ -44,20 +47,20 @@ func New(url string, done chan *LiveStream, mq *internal.MessageQueue, db *inter
waitTime: time.Second * 0,
waitTimeChan: make(chan time.Duration),
mq: mq,
db: db,
store: store,
}
}
// Start the livestream monitoring process, once completion signals on the done channel
func (l *LiveStream) Start() error {
cmd := exec.Command(
config.Instance().DownloaderPath,
config.Instance().Paths.DownloaderPath,
l.url,
"--wait-for-video", "30", // wait for the stream to be live and recheck every 10 secs
"--no-colors", // no ansi color fuzz
"--simulate",
"--newline",
"--paths", config.Instance().DownloadPath,
"--paths", config.Instance().Paths.DownloadPath,
)
stdout, err := cmd.StdoutPipe()
@@ -87,13 +90,12 @@ func (l *LiveStream) Start() error {
l.done <- l
// Send the started livestream to the message queue! :D
p := &internal.Process{
Url: l.url,
Livestream: true,
Params: []string{"--downloader", "ffmpeg", "--no-part"},
}
l.db.Set(p)
l.mq.Publish(p)
//TODO: add pipes
d := downloaders.NewLiveStreamDownloader(l.url, []pipes.Pipe{})
l.store.Set(d)
l.mq.Publish(d)
return nil
}
@@ -141,26 +143,13 @@ func (l *LiveStream) monitorStartTime(r io.Reader) {
}
}
const TRIES = 5
/*
if it's waiting a livestream the 5th line will indicate the time to live
its a dumb and not robust method.
scanner.Scan()
example:
[youtube] Extracting URL: https://www.youtube.com/watch?v=IQVbGfVVjgY
[youtube] IQVbGfVVjgY: Downloading webpage
[youtube] IQVbGfVVjgY: Downloading ios player API JSON
[youtube] IQVbGfVVjgY: Downloading web creator player API JSON
WARNING: [youtube] This live event will begin in 27 minutes. <- STDERR, ignore
[wait] Waiting for 00:27:15 - Press Ctrl+C to try now <- 5th line
*/
for range TRIES {
for !strings.Contains(scanner.Text(), "Waiting for") {
scanner.Scan()
if strings.Contains(scanner.Text(), "Waiting for") {
waitTimeScanner()
}
}
waitTimeScanner()
}
func (l *LiveStream) WaitTime() <-chan time.Duration {

View File

@@ -5,19 +5,22 @@ import (
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
)
func setupTest() {
config.Instance().DownloaderPath = "yt-dlp"
config.Instance().Paths.DownloaderPath = "build/yt-dlp"
}
const URL = "https://www.youtube.com/watch?v=pwoAyLGOysU"
func TestLivestream(t *testing.T) {
setupTest()
done := make(chan *LiveStream)
ls := New("https://www.youtube.com/watch?v=LSm1daKezcE", done, &internal.MessageQueue{}, &internal.MemoryDB{})
ls := New(URL, done, &queue.MessageQueue{}, &kv.Store{})
go ls.Start()
time.AfterFunc(time.Second*20, func() {

View File

@@ -1,27 +1,31 @@
package livestream
import (
"encoding/gob"
"log/slog"
"maps"
"os"
"path/filepath"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
bolt "go.etcd.io/bbolt"
)
var bucket = []byte("livestreams")
type Monitor struct {
db *internal.MemoryDB // where the just started livestream will be published
mq *internal.MessageQueue // where the just started livestream will be published
db *bolt.DB
store *kv.Store // where the just started livestream will be published
mq *queue.MessageQueue // where the just started livestream will be published
streams map[string]*LiveStream // keeps track of the livestreams
done chan *LiveStream // to signal individual processes completition
}
func NewMonitor(mq *internal.MessageQueue, db *internal.MemoryDB) *Monitor {
func NewMonitor(mq *queue.MessageQueue, store *kv.Store, db *bolt.DB) *Monitor {
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucket)
return err
})
return &Monitor{
mq: mq,
db: db,
store: store,
streams: make(map[string]*LiveStream),
done: make(chan *LiveStream),
}
@@ -31,14 +35,24 @@ func NewMonitor(mq *internal.MessageQueue, db *internal.MemoryDB) *Monitor {
func (m *Monitor) Schedule() {
for l := range m.done {
delete(m.streams, l.url)
m.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.Delete([]byte(l.url))
})
}
}
func (m *Monitor) Add(url string) {
ls := New(url, m.done, m.mq, m.db)
ls := New(url, m.done, m.mq, m.store)
go ls.Start()
m.streams[url] = ls
m.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.Put([]byte(url), []byte{})
})
}
func (m *Monitor) Remove(url string) error {
@@ -58,11 +72,6 @@ func (m *Monitor) Status() LiveStreamStatus {
status := make(LiveStreamStatus)
for k, v := range m.streams {
// wt, ok := <-v.WaitTime()
// if !ok {
// continue
// }
status[k] = Status{
Status: v.status,
WaitTime: v.waitTime,
@@ -73,46 +82,13 @@ func (m *Monitor) Status() LiveStreamStatus {
return status
}
// Persist the monitor current state to a file.
// The file is located in the configured config directory
func (m *Monitor) Persist() error {
fd, err := os.Create(filepath.Join(config.Instance().Dir(), "livestreams.dat"))
if err != nil {
return err
}
defer fd.Close()
slog.Debug("persisting livestream monitor state")
var toPersist []string
for url := range maps.Keys(m.streams) {
toPersist = append(toPersist, url)
}
return gob.NewEncoder(fd).Encode(toPersist)
}
// Restore a saved state and resume the monitored livestreams
func (m *Monitor) Restore() error {
fd, err := os.Open(filepath.Join(config.Instance().Dir(), "livestreams.dat"))
if err != nil {
return err
}
defer fd.Close()
var toRestore []string
if err := gob.NewDecoder(fd).Decode(&toRestore); err != nil {
return err
}
for _, url := range toRestore {
m.Add(url)
}
slog.Debug("restored livestream monitor state")
return nil
return m.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.ForEach(func(k, v []byte) error {
m.Add(string(k))
return nil
})
})
}

View File

@@ -1,158 +0,0 @@
package internal
import (
"encoding/gob"
"errors"
"log/slog"
"os"
"path/filepath"
"sync"
"github.com/google/uuid"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
)
var memDbEvents = make(chan *Process)
// In-Memory Thread-Safe Key-Value Storage with optional persistence
type MemoryDB struct {
table map[string]*Process
mu sync.RWMutex
}
func NewMemoryDB() *MemoryDB {
return &MemoryDB{
table: make(map[string]*Process),
}
}
// Get a process pointer given its id
func (m *MemoryDB) Get(id string) (*Process, error) {
m.mu.RLock()
defer m.mu.RUnlock()
entry, ok := m.table[id]
if !ok {
return nil, errors.New("no process found for the given key")
}
return entry, nil
}
// Store a pointer of a process and return its id
func (m *MemoryDB) Set(process *Process) string {
id := uuid.NewString()
m.mu.Lock()
process.Id = id
m.table[id] = process
m.mu.Unlock()
return id
}
// Removes a process progress, given the process id
func (m *MemoryDB) Delete(id string) {
m.mu.Lock()
delete(m.table, id)
m.mu.Unlock()
}
func (m *MemoryDB) Keys() *[]string {
var running []string
m.mu.RLock()
defer m.mu.RUnlock()
for id := range m.table {
running = append(running, id)
}
return &running
}
// Returns a slice of all currently stored processes progess
func (m *MemoryDB) All() *[]ProcessResponse {
running := []ProcessResponse{}
m.mu.RLock()
for k, v := range m.table {
running = append(running, ProcessResponse{
Id: k,
Info: v.Info,
Progress: v.Progress,
Output: v.Output,
Params: v.Params,
})
}
m.mu.RUnlock()
return &running
}
// Persist the database in a single file named "session.dat"
func (m *MemoryDB) Persist() error {
running := m.All()
sf := filepath.Join(config.Instance().SessionFilePath, "session.dat")
fd, err := os.Create(sf)
if err != nil {
return errors.Join(errors.New("failed to persist session"), err)
}
m.mu.RLock()
defer m.mu.RUnlock()
session := Session{Processes: *running}
if err := gob.NewEncoder(fd).Encode(session); err != nil {
return errors.Join(errors.New("failed to persist session"), err)
}
return nil
}
// Restore a persisted state
func (m *MemoryDB) Restore(mq *MessageQueue) {
sf := filepath.Join(config.Instance().SessionFilePath, "session.dat")
fd, err := os.Open(sf)
if err != nil {
return
}
var session Session
if err := gob.NewDecoder(fd).Decode(&session); err != nil {
return
}
m.mu.Lock()
defer m.mu.Unlock()
for _, proc := range session.Processes {
restored := &Process{
Id: proc.Id,
Url: proc.Info.URL,
Info: proc.Info,
Progress: proc.Progress,
Output: proc.Output,
Params: proc.Params,
}
m.table[proc.Id] = restored
if restored.Progress.Status != StatusCompleted {
mq.Publish(restored)
}
}
}
func (m *MemoryDB) EventListener() {
for p := range memDbEvents {
if p.AutoRemove {
slog.Info("compacting MemoryDB", slog.String("id", p.Id))
m.Delete(p.Id)
}
}
}

View File

@@ -1,112 +0,0 @@
package internal
import (
"context"
"errors"
"log/slog"
evbus "github.com/asaskevich/EventBus"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"golang.org/x/sync/semaphore"
)
const queueName = "process:pending"
type MessageQueue struct {
concurrency int
eventBus evbus.Bus
}
// Creates a new message queue.
// By default it will be created with a size equals to nthe number of logical
// CPU cores -1.
// The queue size can be set via the qs flag.
func NewMessageQueue() (*MessageQueue, error) {
qs := config.Instance().QueueSize
if qs <= 0 {
return nil, errors.New("invalid queue size")
}
return &MessageQueue{
concurrency: qs,
eventBus: evbus.New(),
}, nil
}
// Publish a message to the queue and set the task to a peding state.
func (m *MessageQueue) Publish(p *Process) {
// needs to have an id set before
p.SetPending()
m.eventBus.Publish(queueName, p)
}
func (m *MessageQueue) SetupConsumers() {
go m.downloadConsumer()
go m.metadataSubscriber()
}
// Setup the consumer listener which subscribes to the changes to the producer
// channel and triggers the "download" action.
func (m *MessageQueue) downloadConsumer() {
sem := semaphore.NewWeighted(int64(m.concurrency))
m.eventBus.SubscribeAsync(queueName, func(p *Process) {
sem.Acquire(context.Background(), 1)
defer sem.Release(1)
slog.Info("received process from event bus",
slog.String("bus", queueName),
slog.String("consumer", "downloadConsumer"),
slog.String("id", p.getShortId()),
)
if p.Progress.Status != StatusCompleted {
slog.Info("started process",
slog.String("bus", queueName),
slog.String("id", p.getShortId()),
)
if p.Livestream {
// livestreams have higher priorty and they ignore the semaphore
go p.Start()
} else {
p.Start()
}
}
}, false)
}
// Setup the metadata consumer listener which subscribes to the changes to the
// producer channel and adds metadata to each download.
func (m *MessageQueue) metadataSubscriber() {
// How many concurrent metadata fetcher jobs are spawned
// Since there's ongoing downloads, 1 job at time seems a good compromise
sem := semaphore.NewWeighted(1)
m.eventBus.SubscribeAsync(queueName, func(p *Process) {
sem.Acquire(context.Background(), 1)
defer sem.Release(1)
slog.Info("received process from event bus",
slog.String("bus", queueName),
slog.String("consumer", "metadataConsumer"),
slog.String("id", p.getShortId()),
)
if p.Progress.Status == StatusCompleted {
slog.Warn("proccess has an illegal state",
slog.String("id", p.getShortId()),
slog.Int("status", p.Progress.Status),
)
return
}
if err := p.SetMetadata(); err != nil {
slog.Error("failed to retrieve metadata",
slog.String("id", p.getShortId()),
slog.String("err", err.Error()),
)
}
}, false)
}

View File

@@ -0,0 +1,57 @@
package metadata
import (
"bytes"
"encoding/json"
"errors"
"io"
"log/slog"
"os/exec"
"syscall"
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
)
func DefaultFetcher(url string) (*common.DownloadMetadata, error) {
cmd := exec.Command(config.Instance().Paths.DownloaderPath, url, "-J")
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
meta := common.DownloadMetadata{
URL: url,
CreatedAt: time.Now(),
}
if err := cmd.Start(); err != nil {
return nil, err
}
var bufferedStderr bytes.Buffer
go func() {
io.Copy(&bufferedStderr, stderr)
}()
slog.Info("retrieving metadata", slog.String("url", url))
if err := json.NewDecoder(stdout).Decode(&meta); err != nil {
return nil, err
}
if err := cmd.Wait(); err != nil {
return nil, errors.New(bufferedStderr.String())
}
return &meta, nil
}

View File

@@ -0,0 +1,92 @@
package pipeline
import (
"encoding/json"
"net/http"
"github.com/go-chi/chi/v5"
bolt "go.etcd.io/bbolt"
)
type handler struct {
store *Store
}
func NewRestHandler(db *bolt.DB) *handler {
store, _ := NewStore(db)
return &handler{
store: store,
}
}
func (h *handler) GetPipeline(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id := chi.URLParam(r, "id")
p, err := h.store.Get(id)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err := json.NewEncoder(w).Encode(p); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (h *handler) GetAllPipelines(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
p, err := h.store.List()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := json.NewEncoder(w).Encode(p); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (h *handler) SavePipeline(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
defer r.Body.Close()
var req Pipeline
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
id, err := h.store.Save(req)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err := json.NewEncoder(w).Encode(id); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (h *handler) DeletePipeline(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
id := chi.URLParam(r, "id")
err := h.store.Delete(id)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err := json.NewEncoder(w).Encode("ok"); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}

View File

@@ -0,0 +1,103 @@
package pipeline
import (
"encoding/json"
"fmt"
"github.com/google/uuid"
bolt "go.etcd.io/bbolt"
)
var bucket = []byte("pipelines")
type Step struct {
Type string `json:"type"` // es. "transcoder", "filewriter"
FFmpegArgs []string `json:"ffmpeg_args,omitempty"` // args da passare a ffmpeg
Path string `json:"path,omitempty"` // solo per filewriter
Extension string `json:"extension,omitempty"` // solo per filewriter
}
type Pipeline struct {
ID string `json:"id"`
Name string `json:"name"`
Steps []Step `json:"steps"`
}
type Store struct {
db *bolt.DB
}
func NewStore(db *bolt.DB) (*Store, error) {
// init bucket
err := db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucket)
return err
})
if err != nil {
return nil, err
}
return &Store{db: db}, nil
}
func (s *Store) Save(p Pipeline) (string, error) {
if p.ID == "" {
p.ID = uuid.NewString()
}
data, err := json.Marshal(p)
if err != nil {
return "", err
}
return p.ID, s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.Put([]byte(p.ID), data)
})
}
func (s *Store) Get(id string) (*Pipeline, error) {
var p Pipeline
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
v := b.Get([]byte(id))
if v == nil {
return fmt.Errorf("pipeline %s not found", id)
}
return json.Unmarshal(v, &p)
})
if err != nil {
return nil, err
}
return &p, nil
}
func (s *Store) List() ([]Pipeline, error) {
var result []Pipeline
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.ForEach(func(k, v []byte) error {
var p Pipeline
if err := json.Unmarshal(v, &p); err != nil {
return err
}
result = append(result, p)
return nil
})
})
if err != nil {
return nil, err
}
return result, nil
}
func (s *Store) Delete(id string) error {
return s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.Delete([]byte(id))
})
}

View File

@@ -0,0 +1,45 @@
package pipes
import (
"io"
"log/slog"
"os"
)
type FileWriter struct {
Path string
IsFinal bool
}
func (f *FileWriter) Name() string { return "file-writer" }
func (f *FileWriter) Connect(r io.Reader) (io.Reader, error) {
file, err := os.Create(f.Path)
if err != nil {
return nil, err
}
if f.IsFinal {
go func() {
defer file.Close()
if _, err := io.Copy(file, r); err != nil {
slog.Error("FileWriter (final) error", slog.Any("err", err))
}
}()
return r, nil
}
pr, pw := io.Pipe()
go func() {
defer file.Close()
defer pw.Close()
writer := io.MultiWriter(file, pw)
if _, err := io.Copy(writer, r); err != nil {
slog.Error("FileWriter (pipeline) error", slog.Any("err", err))
}
}()
return pr, nil
}

View File

@@ -0,0 +1,66 @@
package pipes
import (
"bufio"
"errors"
"io"
"log/slog"
"os/exec"
"strings"
)
type Transcoder struct {
Args []string
}
func (t *Transcoder) Name() string { return "ffmpeg-transcoder" }
func (t *Transcoder) Connect(r io.Reader) (io.Reader, error) {
cmd := exec.Command("ffmpeg",
append([]string{"-i", "pipe:0"}, append(t.Args, "-f", "webm", "pipe:1")...)...,
)
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
go func() {
reader := bufio.NewReader(stderr)
var line string
for {
part, err := reader.ReadString('\r')
line += part
if err != nil {
break
}
line = strings.TrimRight(line, "\r\n")
slog.Info("ffmpeg transcoder", slog.String("log", line))
line = ""
}
}()
go func() {
defer stdin.Close()
_, err := io.Copy(stdin, r)
if err != nil && !errors.Is(err, io.EOF) {
slog.Error("transcoder stdin error", slog.Any("err", err))
}
}()
if err := cmd.Start(); err != nil {
return nil, err
}
return stdout, nil
}

View File

@@ -0,0 +1,8 @@
package pipes
import "io"
type Pipe interface {
Name() string
Connect(r io.Reader) (io.Reader, error)
}

View File

@@ -1,16 +0,0 @@
package internal
type Pool []*Worker
func (h Pool) Len() int { return len(h) }
func (h Pool) Less(i, j int) bool { return h[i].index < h[j].index }
func (h Pool) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *Pool) Push(x any) { *h = append(*h, x.(*Worker)) }
func (h *Pool) Pop() any {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}

View File

@@ -1,384 +0,0 @@
package internal
import (
"bufio"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
"regexp"
"slices"
"syscall"
"os"
"os/exec"
"strings"
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/archiver"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
)
const downloadTemplate = `download:
{
"eta":%(progress.eta)s,
"percentage":"%(progress._percent_str)s",
"speed":%(progress.speed)s
}`
// filename not returning the correct extension after postprocess
const postprocessTemplate = `postprocess:
{
"filepath":"%(info.filepath)s"
}
`
const (
StatusPending = iota
StatusDownloading
StatusCompleted
StatusErrored
)
// Process descriptor
type Process struct {
Id string
Url string
Livestream bool
AutoRemove bool
Params []string
Info DownloadInfo
Progress DownloadProgress
Output DownloadOutput
proc *os.Process
}
// Starts spawns/forks a new yt-dlp process and parse its stdout.
// The process is spawned to outputting a custom progress text that
// Resembles a JSON Object in order to Unmarshal it later.
// This approach is anyhow not perfect: quotes are not escaped properly.
// Each process is not identified by its PID but by a UUIDv4
func (p *Process) Start() {
// escape bash variable escaping and command piping, you'll never know
// what they might come with...
p.Params = slices.DeleteFunc(p.Params, func(e string) bool {
match, _ := regexp.MatchString(`(\$\{)|(\&\&)`, e)
return match
})
p.Params = slices.DeleteFunc(p.Params, func(e string) bool {
return e == ""
})
out := DownloadOutput{
Path: config.Instance().DownloadPath,
Filename: "%(title)s.%(ext)s",
}
if p.Output.Path != "" {
out.Path = p.Output.Path
}
if p.Output.Filename != "" {
out.Filename = p.Output.Filename
}
buildFilename(&p.Output)
templateReplacer := strings.NewReplacer("\n", "", "\t", "", " ", "")
baseParams := []string{
strings.Split(p.Url, "?list")[0], //no playlist
"--newline",
"--no-colors",
"--no-playlist",
"--progress-template",
templateReplacer.Replace(downloadTemplate),
"--progress-template",
templateReplacer.Replace(postprocessTemplate),
}
// if user asked to manually override the output path...
if !(slices.Contains(p.Params, "-P") || slices.Contains(p.Params, "--paths")) {
p.Params = append(p.Params, "-o")
p.Params = append(p.Params, fmt.Sprintf("%s/%s", out.Path, out.Filename))
}
params := append(baseParams, p.Params...)
slog.Info("requesting download", slog.String("url", p.Url), slog.Any("params", params))
cmd := exec.Command(config.Instance().DownloaderPath, params...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
stdout, err := cmd.StdoutPipe()
if err != nil {
slog.Error("failed to get a stdout pipe", slog.Any("err", err))
panic(err)
}
stderr, err := cmd.StderrPipe()
if err != nil {
slog.Error("failed to get a stderr pipe", slog.Any("err", err))
panic(err)
}
if err := cmd.Start(); err != nil {
slog.Error("failed to start yt-dlp process", slog.Any("err", err))
panic(err)
}
p.proc = cmd.Process
ctx, cancel := context.WithCancel(context.Background())
defer func() {
stdout.Close()
p.Complete()
cancel()
}()
logs := make(chan []byte)
go produceLogs(stdout, logs)
go p.consumeLogs(ctx, logs)
go p.detectYtDlpErrors(stderr)
cmd.Wait()
}
func produceLogs(r io.Reader, logs chan<- []byte) {
go func() {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
logs <- scanner.Bytes()
}
}()
}
func (p *Process) consumeLogs(ctx context.Context, logs <-chan []byte) {
for {
select {
case <-ctx.Done():
slog.Info("detaching from yt-dlp stdout",
slog.String("id", p.getShortId()),
slog.String("url", p.Url),
)
return
case entry := <-logs:
p.parseLogEntry(entry)
}
}
}
func (p *Process) parseLogEntry(entry []byte) {
var progress ProgressTemplate
var postprocess PostprocessTemplate
if err := json.Unmarshal(entry, &progress); err == nil {
p.Progress = DownloadProgress{
Status: StatusDownloading,
Percentage: progress.Percentage,
Speed: progress.Speed,
ETA: progress.Eta,
}
slog.Info("progress",
slog.String("id", p.getShortId()),
slog.String("url", p.Url),
slog.String("percentage", progress.Percentage),
)
}
if err := json.Unmarshal(entry, &postprocess); err == nil {
p.Output.SavedFilePath = postprocess.FilePath
// slog.Info("postprocess",
// slog.String("id", p.getShortId()),
// slog.String("url", p.Url),
// slog.String("filepath", postprocess.FilePath),
// )
}
}
func (p *Process) detectYtDlpErrors(r io.Reader) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
slog.Error("yt-dlp process error",
slog.String("id", p.getShortId()),
slog.String("url", p.Url),
slog.String("err", scanner.Text()),
)
}
}
// Keep process in the memoryDB but marks it as complete
// Convention: All completed processes has progress -1
// and speed 0 bps.
func (p *Process) Complete() {
// auto archive
// TODO: it's not that deterministic :/
if p.Progress.Percentage == "" && p.Progress.Speed == 0 {
var serializedMetadata bytes.Buffer
json.NewEncoder(&serializedMetadata).Encode(p.Info)
archiver.Publish(&archiver.Message{
Id: p.Id,
Path: p.Output.SavedFilePath,
Title: p.Info.Title,
Thumbnail: p.Info.Thumbnail,
Source: p.Url,
Metadata: serializedMetadata.String(),
CreatedAt: p.Info.CreatedAt,
})
}
p.Progress = DownloadProgress{
Status: StatusCompleted,
Percentage: "-1",
Speed: 0,
ETA: 0,
}
// for safety, if the filename is not set, set it with original function
if p.Output.SavedFilePath == "" {
p.GetFileName(&p.Output)
}
slog.Info("finished",
slog.String("id", p.getShortId()),
slog.String("url", p.Url),
)
memDbEvents <- p
}
// Kill a process and remove it from the memory
func (p *Process) Kill() error {
defer func() {
p.Progress.Status = StatusCompleted
}()
// yt-dlp uses multiple child process the parent process
// has been spawned with setPgid = true. To properly kill
// all subprocesses a SIGTERM need to be sent to the correct
// process group
if p.proc == nil {
return errors.New("*os.Process not set")
}
pgid, err := syscall.Getpgid(p.proc.Pid)
if err != nil {
return err
}
if err := syscall.Kill(-pgid, syscall.SIGTERM); err != nil {
return err
}
return nil
}
func (p *Process) GetFileName(o *DownloadOutput) error {
cmd := exec.Command(
config.Instance().DownloaderPath,
"--print", "filename",
"-o", fmt.Sprintf("%s/%s", o.Path, o.Filename),
p.Url,
)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
out, err := cmd.Output()
if err != nil {
return err
}
p.Output.SavedFilePath = strings.Trim(string(out), "\n")
return nil
}
func (p *Process) SetPending() {
// Since video's title isn't available yet, fill in with the URL.
p.Info = DownloadInfo{
URL: p.Url,
Title: p.Url,
CreatedAt: time.Now(),
}
p.Progress.Status = StatusPending
}
func (p *Process) SetMetadata() error {
cmd := exec.Command(config.Instance().DownloaderPath, p.Url, "-J")
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
stdout, err := cmd.StdoutPipe()
if err != nil {
slog.Error("failed to connect to stdout",
slog.String("id", p.getShortId()),
slog.String("url", p.Url),
slog.String("err", err.Error()),
)
return err
}
stderr, err := cmd.StderrPipe()
if err != nil {
slog.Error("failed to connect to stderr",
slog.String("id", p.getShortId()),
slog.String("url", p.Url),
slog.String("err", err.Error()),
)
return err
}
info := DownloadInfo{
URL: p.Url,
CreatedAt: time.Now(),
}
if err := cmd.Start(); err != nil {
return err
}
var bufferedStderr bytes.Buffer
go func() {
io.Copy(&bufferedStderr, stderr)
}()
slog.Info("retrieving metadata",
slog.String("id", p.getShortId()),
slog.String("url", p.Url),
)
if err := json.NewDecoder(stdout).Decode(&info); err != nil {
return err
}
p.Info = info
p.Progress.Status = StatusPending
if err := cmd.Wait(); err != nil {
return errors.New(bufferedStderr.String())
}
return nil
}
func (p *Process) getShortId() string { return strings.Split(p.Id, "-")[0] }
func buildFilename(o *DownloadOutput) {
if o.Filename != "" && strings.Contains(o.Filename, ".%(ext)s") {
o.Filename += ".%(ext)s"
}
o.Filename = strings.Replace(
o.Filename,
".%(ext)s.%(ext)s",
".%(ext)s",
1,
)
}

View File

@@ -0,0 +1,123 @@
package queue
import (
"context"
"errors"
"log/slog"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/metadata"
)
type MessageQueue struct {
concurrency int
downloadQueue chan downloaders.Downloader
metadataQueue chan downloaders.Downloader
ctx context.Context
cancel context.CancelFunc
}
func NewMessageQueue() (*MessageQueue, error) {
qs := config.Instance().Server.QueueSize
if qs <= 0 {
return nil, errors.New("invalid queue size")
}
ctx, cancel := context.WithCancel(context.Background())
return &MessageQueue{
concurrency: qs,
downloadQueue: make(chan downloaders.Downloader, qs*2),
metadataQueue: make(chan downloaders.Downloader, qs*4),
ctx: ctx,
cancel: cancel,
}, nil
}
// Publish download job
func (m *MessageQueue) Publish(d downloaders.Downloader) {
d.SetPending(true)
select {
case m.downloadQueue <- d:
slog.Info("published download", slog.String("id", d.GetId()))
case <-m.ctx.Done():
slog.Warn("queue stopped, dropping download", slog.String("id", d.GetId()))
}
}
// Workers: download + metadata
func (m *MessageQueue) SetupConsumers() {
// N parallel workers for downloadQueue
for i := 0; i < m.concurrency; i++ {
go m.downloadWorker(i)
}
// 1 serial worker for metadata
go m.metadataWorker()
}
// Worker dei download
func (m *MessageQueue) downloadWorker(workerId int) {
for {
select {
case <-m.ctx.Done():
return
case p := <-m.downloadQueue:
if p == nil {
continue
}
if p.IsCompleted() {
continue
}
slog.Info("download worker started",
slog.Int("worker", workerId),
slog.String("id", p.GetId()),
)
p.Start()
// after the download starts succesfully we pass it to the metadata queue
select {
case m.metadataQueue <- p:
slog.Info("queued for metadata", slog.String("id", p.GetId()))
case <-m.ctx.Done():
return
}
}
}
}
func (m *MessageQueue) metadataWorker() {
for {
select {
case <-m.ctx.Done():
return
case p := <-m.metadataQueue:
if p == nil {
continue
}
slog.Info("metadata worker started",
slog.String("id", p.GetId()),
)
if p.IsCompleted() {
slog.Warn("metadata skipped, illegal state",
slog.String("id", p.GetId()),
)
continue
}
p.SetMetadata(metadata.DefaultFetcher)
}
}
}
func (m *MessageQueue) Stop() {
m.cancel()
close(m.downloadQueue)
close(m.metadataQueue)
}

View File

@@ -1,15 +0,0 @@
package internal
type Worker struct {
requests chan Process // downloads to do
pending int // downloads pending
index int // index in the heap
}
func (w *Worker) Work(done chan *Worker) {
for {
req := <-w.requests
req.Start()
done <- w
}
}

View File

@@ -91,10 +91,10 @@ func sse(logger *ObservableLogger) http.HandlerFunc {
func ApplyRouter(logger *ObservableLogger) func(chi.Router) {
return func(r chi.Router) {
if config.Instance().RequireAuth {
if config.Instance().Authentication.RequireAuth {
r.Use(middlewares.Authenticated)
}
if config.Instance().UseOpenId {
if config.Instance().OpenId.UseOpenId {
r.Use(openid.Middleware)
}
r.Get("/ws", webSocket(logger))

View File

@@ -0,0 +1,21 @@
package middlewares
import (
"net/http"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/openid"
)
func ApplyAuthenticationByConfig(next http.Handler) http.Handler {
handler := next
if config.Instance().Authentication.RequireAuth {
handler = Authenticated(handler)
}
if config.Instance().OpenId.UseOpenId {
handler = openid.Middleware(handler)
}
return handler
}

View File

@@ -14,24 +14,27 @@ var (
)
func Configure() {
if !config.Instance().UseOpenId {
if !config.Instance().OpenId.UseOpenId {
return
}
provider, err := oidc.NewProvider(context.Background(), config.Instance().OpenIdProviderURL)
provider, err := oidc.NewProvider(
context.Background(),
config.Instance().OpenId.ProviderURL,
)
if err != nil {
panic(err)
}
oauth2Config = oauth2.Config{
ClientID: config.Instance().OpenIdClientId,
ClientSecret: config.Instance().OpenIdClientSecret,
RedirectURL: config.Instance().OpenIdRedirectURL,
ClientID: config.Instance().OpenId.ClientId,
ClientSecret: config.Instance().OpenId.ClientSecret,
RedirectURL: config.Instance().OpenId.RedirectURL,
Endpoint: provider.Endpoint(),
Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
}
verifier = provider.Verifier(&oidc.Config{
ClientID: config.Instance().OpenIdClientId,
ClientID: config.Instance().OpenId.ClientId,
})
}

View File

@@ -6,10 +6,12 @@ import (
"encoding/json"
"errors"
"net/http"
"slices"
"time"
"github.com/coreos/go-oidc/v3/oidc"
"github.com/google/uuid"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"golang.org/x/oauth2"
)
@@ -76,6 +78,21 @@ func doAuthentification(r *http.Request, setCookieCallback func(t *oauth2.Token)
return nil, err
}
var claims struct {
Email string `json:"email"`
Verified bool `json:"email_verified"`
}
if err := idToken.Claims(&claims); err != nil {
return nil, err
}
whitelist := config.Instance().OpenId.EmailWhitelist
if len(whitelist) > 0 && !slices.Contains(whitelist, claims.Email) {
return nil, errors.New("email address not found in ACL")
}
nonce, err := r.Cookie("nonce")
if err != nil {
return nil, err

View File

@@ -0,0 +1,86 @@
package playlist
import (
"slices"
"strconv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
)
/*
Applicable modifiers
full | short | description
---------------------------------------------------------------------------------
--playlist-start NUMBER | -I NUMBER: | discard first N entries
--playlist-end NUMBER | -I :NUMBER | discard last N entries
--playlist-reverse | -I ::-1 | self explanatory
--max-downloads NUMBER | | stops after N completed downloads
*/
func ApplyModifiers(entries *[]common.DownloadMetadata, args []string) error {
for i, modifier := range args {
switch modifier {
case "--playlist-start":
return playlistStart(i, modifier, args, entries)
case "--playlist-end":
return playlistEnd(i, modifier, args, entries)
case "--max-downloads":
return maxDownloads(i, modifier, args, entries)
case "--playlist-reverse":
slices.Reverse(*entries)
return nil
}
}
return nil
}
func playlistStart(i int, modifier string, args []string, entries *[]common.DownloadMetadata) error {
if !guard(i, len(modifier)) {
return nil
}
n, err := strconv.Atoi(args[i+1])
if err != nil {
return err
}
*entries = (*entries)[n:]
return nil
}
func playlistEnd(i int, modifier string, args []string, entries *[]common.DownloadMetadata) error {
if !guard(i, len(modifier)) {
return nil
}
n, err := strconv.Atoi(args[i+1])
if err != nil {
return err
}
*entries = (*entries)[:n]
return nil
}
func maxDownloads(i int, modifier string, args []string, entries *[]common.DownloadMetadata) error {
if !guard(i, len(modifier)) {
return nil
}
n, err := strconv.Atoi(args[i+1])
if err != nil {
return err
}
*entries = (*entries)[0:n]
return nil
}
func guard(i, len int) bool { return i+1 < len-1 }

View File

@@ -1,4 +1,4 @@
package internal
package playlist
import (
"encoding/json"
@@ -9,20 +9,21 @@ import (
"strings"
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
)
type metadata struct {
Entries []DownloadInfo `json:"entries"`
Count int `json:"playlist_count"`
PlaylistTitle string `json:"title"`
Type string `json:"_type"`
}
func PlaylistDetect(req internal.DownloadRequest, mq *queue.MessageQueue, db *kv.Store) error {
params := append(req.Params, "--flat-playlist", "-J")
urlWithParams := append([]string{req.URL}, params...)
func PlaylistDetect(req DownloadRequest, mq *MessageQueue, db *MemoryDB) error {
var (
downloader = config.Instance().DownloaderPath
cmd = exec.Command(downloader, req.URL, "--flat-playlist", "-J")
downloader = config.Instance().Paths.DownloaderPath
cmd = exec.Command(downloader, urlWithParams...)
)
stdout, err := cmd.StdoutPipe()
@@ -30,7 +31,7 @@ func PlaylistDetect(req DownloadRequest, mq *MessageQueue, db *MemoryDB) error {
return err
}
var m metadata
var m Metadata
if err := cmd.Start(); err != nil {
return err
@@ -52,13 +53,21 @@ func PlaylistDetect(req DownloadRequest, mq *MessageQueue, db *MemoryDB) error {
return errors.New("probably not a valid URL")
}
if m.Type == "playlist" {
entries := slices.CompactFunc(slices.Compact(m.Entries), func(a DownloadInfo, b DownloadInfo) bool {
if m.IsPlaylist() {
entries := slices.CompactFunc(slices.Compact(m.Entries), func(a common.DownloadMetadata, b common.DownloadMetadata) bool {
return a.URL == b.URL
})
entries = slices.DeleteFunc(entries, func(e common.DownloadMetadata) bool {
return strings.Contains(e.URL, "list=")
})
slog.Info("playlist detected", slog.String("url", req.URL), slog.Int("count", len(entries)))
if err := ApplyModifiers(&entries, req.Params); err != nil {
return err
}
for i, meta := range entries {
// detect playlist title from metadata since each playlist entry will be
// treated as an individual download
@@ -72,31 +81,22 @@ func PlaylistDetect(req DownloadRequest, mq *MessageQueue, db *MemoryDB) error {
//XXX: it's idiotic but it works: virtually delay the creation time
meta.CreatedAt = time.Now().Add(time.Millisecond * time.Duration(i*10))
proc := &Process{
Url: meta.URL,
Progress: DownloadProgress{},
Output: DownloadOutput{Filename: req.Rename},
Info: meta,
Params: req.Params,
}
downloader := downloaders.NewGenericDownload(meta.URL, req.Params)
downloader.SetOutput(internal.DownloadOutput{Filename: req.Rename})
// downloader.SetMetadata(meta)
proc.Info.URL = meta.URL
time.Sleep(time.Millisecond)
db.Set(proc)
mq.Publish(proc)
db.Set(downloader)
mq.Publish(downloader)
}
return nil
}
proc := &Process{
Url: req.URL,
Params: req.Params,
}
d := downloaders.NewGenericDownload(req.URL, req.Params)
db.Set(proc)
mq.Publish(proc)
slog.Info("sending new process to message queue", slog.String("url", proc.Url))
db.Set(d)
mq.Publish(d)
slog.Info("sending new process to message queue", slog.String("url", d.GetUrl()))
return cmd.Wait()
}

12
server/playlist/types.go Normal file
View File

@@ -0,0 +1,12 @@
package playlist
import "github.com/marcopiovanello/yt-dlp-web-ui/v3/server/common"
type Metadata struct {
Entries []common.DownloadMetadata `json:"entries"`
Count int `json:"playlist_count"`
PlaylistTitle string `json:"title"`
Type string `json:"_type"`
}
func (m *Metadata) IsPlaylist() bool { return m.Type == "playlist" }

View File

@@ -1,13 +1,16 @@
package rest
import (
"database/sql"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/livestream"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
bolt "go.etcd.io/bbolt"
)
type ContainerArgs struct {
DB *sql.DB
MDB *internal.MemoryDB
MQ *internal.MessageQueue
DB *bolt.DB
MDB *kv.Store
MQ *queue.MessageQueue
LM *livestream.Monitor
}

View File

@@ -19,10 +19,10 @@ func ApplyRouter(args *ContainerArgs) func(chi.Router) {
h := Container(args)
return func(r chi.Router) {
if config.Instance().RequireAuth {
if config.Instance().Authentication.RequireAuth {
r.Use(middlewares.Authenticated)
}
if config.Instance().UseOpenId {
if config.Instance().OpenId.UseOpenId {
r.Use(openid.Middleware)
}
r.Post("/exec", h.Exec())

View File

@@ -14,11 +14,7 @@ var (
func ProvideService(args *ContainerArgs) *Service {
serviceOnce.Do(func() {
service = &Service{
mdb: args.MDB,
db: args.DB,
mq: args.MQ,
}
service = NewService(args.MDB, args.DB, args.MQ, args.LM)
})
return service
}

View File

@@ -2,8 +2,9 @@ package rest
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
@@ -12,41 +13,62 @@ import (
"github.com/google/uuid"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/livestream"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/playlist"
bolt "go.etcd.io/bbolt"
)
type Service struct {
mdb *internal.MemoryDB
db *sql.DB
mq *internal.MessageQueue
mdb *kv.Store
db *bolt.DB
mq *queue.MessageQueue
lm *livestream.Monitor
}
func (s *Service) Exec(req internal.DownloadRequest) (string, error) {
p := &internal.Process{
Url: req.URL,
Params: req.Params,
Output: internal.DownloadOutput{
Path: req.Path,
Filename: req.Rename,
},
func NewService(
mdb *kv.Store,
db *bolt.DB,
mq *queue.MessageQueue,
lm *livestream.Monitor,
) *Service {
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists([]byte("templates"))
return err
})
return &Service{
mdb: mdb,
db: db,
mq: mq,
lm: lm,
}
}
id := s.mdb.Set(p)
s.mq.Publish(p)
func (s *Service) Exec(req internal.DownloadRequest) (string, error) {
d := downloaders.NewGenericDownload(req.URL, req.Params)
d.SetOutput(internal.DownloadOutput{
Path: req.Path,
Filename: req.Rename,
})
id := s.mdb.Set(d)
s.mq.Publish(d)
return id, nil
}
func (s *Service) ExecPlaylist(req internal.DownloadRequest) error {
return internal.PlaylistDetect(req, s.mq, s.mdb)
return playlist.PlaylistDetect(req, s.mq, s.mdb)
}
func (s *Service) ExecLivestream(req internal.DownloadRequest) {
s.lm.Add(req.URL)
}
func (s *Service) Running(ctx context.Context) (*[]internal.ProcessResponse, error) {
func (s *Service) Running(ctx context.Context) (*[]internal.ProcessSnapshot, error) {
select {
case <-ctx.Done():
return nil, context.Canceled
@@ -84,64 +106,56 @@ func (s *Service) SetCookies(ctx context.Context, cookies string) error {
}
func (s *Service) SaveTemplate(ctx context.Context, template *internal.CustomTemplate) error {
conn, err := s.db.Conn(ctx)
if err != nil {
return err
}
defer conn.Close()
_, err = conn.ExecContext(
ctx,
"INSERT INTO templates (id, name, content) VALUES (?, ?, ?)",
uuid.NewString(),
template.Name,
template.Content,
)
return err
return s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("templates"))
v, err := json.Marshal(template)
if err != nil {
return err
}
return b.Put([]byte(uuid.NewString()), v)
})
}
func (s *Service) GetTemplates(ctx context.Context) (*[]internal.CustomTemplate, error) {
conn, err := s.db.Conn(ctx)
if err != nil {
return nil, err
}
defer conn.Close()
rows, err := conn.QueryContext(ctx, "SELECT * FROM templates")
if err != nil {
return nil, err
}
defer rows.Close()
templates := make([]internal.CustomTemplate, 0)
for rows.Next() {
t := internal.CustomTemplate{}
err := rows.Scan(&t.Id, &t.Name, &t.Content)
if err != nil {
return nil, err
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("templates"))
if b == nil {
return nil // bucket vuoto, restituisco lista vuota
}
templates = append(templates, t)
return b.ForEach(func(k, v []byte) error {
var t internal.CustomTemplate
if err := json.Unmarshal(v, &t); err != nil {
return err
}
templates = append(templates, t)
return nil
})
})
if err != nil {
return nil, err
}
return &templates, nil
}
func (s *Service) UpdateTemplate(ctx context.Context, t *internal.CustomTemplate) (*internal.CustomTemplate, error) {
conn, err := s.db.Conn(ctx)
data, err := json.Marshal(t)
if err != nil {
return nil, err
}
defer conn.Close()
err = s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("templates"))
if b == nil {
return fmt.Errorf("bucket templates not found")
}
return b.Put([]byte(t.Id), data)
})
_, err = conn.ExecContext(ctx, "UPDATE templates SET name = ?, content = ? WHERE id = ?", t.Name, t.Content, t.Id)
if err != nil {
return nil, err
}
@@ -150,28 +164,22 @@ func (s *Service) UpdateTemplate(ctx context.Context, t *internal.CustomTemplate
}
func (s *Service) DeleteTemplate(ctx context.Context, id string) error {
conn, err := s.db.Conn(ctx)
if err != nil {
return err
}
defer conn.Close()
_, err = conn.ExecContext(ctx, "DELETE FROM templates WHERE id = ?", id)
return err
return s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("templates"))
return b.Delete([]byte(id))
})
}
func (s *Service) GetVersion(ctx context.Context) (string, string, error) {
//TODO: load from realease properties file, or anything else outside code
const CURRENT_RPC_VERSION = "3.2.5"
const CURRENT_RPC_VERSION = "3.2.6"
result := make(chan string, 1)
ctx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
cmd := exec.CommandContext(ctx, config.Instance().DownloaderPath, "--version")
cmd := exec.CommandContext(ctx, config.Instance().Paths.DownloaderPath, "--version")
go func() {
stdout, _ := cmd.Output()
result <- string(stdout)

View File

@@ -3,14 +3,15 @@ package rpc
import (
"github.com/go-chi/chi/v5"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/livestream"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
middlewares "github.com/marcopiovanello/yt-dlp-web-ui/v3/server/middleware"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/openid"
)
// Dependency injection container.
func Container(db *internal.MemoryDB, mq *internal.MessageQueue, lm *livestream.Monitor) *Service {
func Container(db *kv.Store, mq *queue.MessageQueue, lm *livestream.Monitor) *Service {
return &Service{
db: db,
mq: mq,
@@ -21,10 +22,10 @@ func Container(db *internal.MemoryDB, mq *internal.MessageQueue, lm *livestream.
// RPC service must be registered before applying this router!
func ApplyRouter() func(chi.Router) {
return func(r chi.Router) {
if config.Instance().RequireAuth {
if config.Instance().Authentication.RequireAuth {
r.Use(middlewares.Authenticated)
}
if config.Instance().UseOpenId {
if config.Instance().OpenId.UseOpenId {
r.Use(openid.Middleware)
}
r.Get("/ws", WebSocket)

View File

@@ -6,18 +6,22 @@ import (
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/formats"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/livestream"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/playlist"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/sys"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/updater"
)
type Service struct {
db *internal.MemoryDB
mq *internal.MessageQueue
db *kv.Store
mq *queue.MessageQueue
lm *livestream.Monitor
}
type Running []internal.ProcessResponse
type Running []internal.ProcessSnapshot
type Pending []string
type NoArgs struct{}
@@ -25,26 +29,23 @@ type NoArgs struct{}
// Exec spawns a Process.
// The result of the execution is the newly spawned process Id.
func (s *Service) Exec(args internal.DownloadRequest, result *string) error {
p := &internal.Process{
Url: args.URL,
Params: args.Params,
Output: internal.DownloadOutput{
Path: args.Path,
Filename: args.Rename,
},
}
d := downloaders.NewGenericDownload(args.URL, args.Params)
d.SetOutput(internal.DownloadOutput{
Path: args.Path,
Filename: args.Rename,
})
s.db.Set(p)
s.mq.Publish(p)
s.db.Set(d)
s.mq.Publish(d)
*result = p.Id
*result = d.GetId()
return nil
}
// Exec spawns a Process.
// The result of the execution is the newly spawned process Id.
func (s *Service) ExecPlaylist(args internal.DownloadRequest, result *string) error {
err := internal.PlaylistDetect(args, s.mq, s.db)
err := playlist.PlaylistDetect(args, s.mq, s.db)
if err != nil {
return err
}
@@ -87,12 +88,12 @@ func (s *Service) KillAllLivestream(args NoArgs, result *struct{}) error {
// Progess retrieves the Progress of a specific Process given its Id
func (s *Service) Progess(args internal.DownloadRequest, progress *internal.DownloadProgress) error {
proc, err := s.db.Get(args.Id)
dl, err := s.db.Get(args.Id)
if err != nil {
return err
}
*progress = proc.Progress
*progress = dl.Status().Progress
return nil
}
@@ -106,7 +107,7 @@ func (s *Service) Formats(args internal.DownloadRequest, meta *formats.Metadata)
}
if metadata.IsPlaylist() {
go internal.PlaylistDetect(args, s.mq, s.db)
go playlist.PlaylistDetect(args, s.mq, s.db)
}
*meta = *metadata
@@ -129,22 +130,22 @@ func (s *Service) Running(args NoArgs, running *Running) error {
func (s *Service) Kill(args string, killed *string) error {
slog.Info("Trying killing process with id", slog.String("id", args))
proc, err := s.db.Get(args)
download, err := s.db.Get(args)
if err != nil {
return err
}
if proc == nil {
if download == nil {
return errors.New("nil process")
}
if err := proc.Kill(); err != nil {
slog.Info("failed killing process", slog.String("id", proc.Id), slog.Any("err", err))
if err := download.Stop(); err != nil {
slog.Info("failed killing process", slog.String("id", download.GetId()), slog.Any("err", err))
return err
}
s.db.Delete(proc.Id)
slog.Info("succesfully killed process", slog.String("id", proc.Id))
s.db.Delete(download.GetId())
slog.Info("succesfully killed process", slog.String("id", download.GetId()))
return nil
}
@@ -156,33 +157,33 @@ func (s *Service) KillAll(args NoArgs, killed *string) error {
var (
keys = s.db.Keys()
removeFunc = func(p *internal.Process) error {
defer s.db.Delete(p.Id)
return p.Kill()
removeFunc = func(d downloaders.Downloader) error {
defer s.db.Delete(d.GetId())
return d.Stop()
}
)
for _, key := range *keys {
proc, err := s.db.Get(key)
dl, err := s.db.Get(key)
if err != nil {
return err
}
if proc == nil {
if dl == nil {
s.db.Delete(key)
continue
}
if err := removeFunc(proc); err != nil {
if err := removeFunc(dl); err != nil {
slog.Info(
"failed killing process",
slog.String("id", proc.Id),
slog.String("id", dl.GetId()),
slog.Any("err", err),
)
continue
}
slog.Info("succesfully killed process", slog.String("id", proc.Id))
slog.Info("succesfully killed process", slog.String("id", dl.GetId()))
}
return nil
@@ -195,6 +196,35 @@ func (s *Service) Clear(args string, killed *string) error {
return nil
}
// Removes completed processes
func (s *Service) ClearCompleted(cleared *string) error {
var (
keys = s.db.Keys()
removeFunc = func(d downloaders.Downloader) error {
defer s.db.Delete(d.GetId())
if !d.IsCompleted() {
return nil
}
return d.Stop()
}
)
for _, key := range *keys {
proc, err := s.db.Get(key)
if err != nil {
return err
}
if err := removeFunc(proc); err != nil {
return err
}
}
return nil
}
// FreeSpace gets the available from package sys util
func (s *Service) FreeSpace(args NoArgs, free *uint64) error {
freeSpace, err := sys.FreeSpace()

View File

@@ -3,7 +3,6 @@ package server
import (
"context"
"database/sql"
"fmt"
"io"
"io/fs"
@@ -12,20 +11,18 @@ import (
"net/http"
"net/rpc"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/cors"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/archive"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/archiver"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/dbutil"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/filebrowser"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/livestream"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/pipeline"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/logging"
middlewares "github.com/marcopiovanello/yt-dlp-web-ui/v3/server/middleware"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/openid"
@@ -34,9 +31,10 @@ import (
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/status"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/task"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/twitch"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/user"
_ "modernc.org/sqlite"
bolt "go.etcd.io/bbolt"
)
type RunConfig struct {
@@ -45,19 +43,31 @@ type RunConfig struct {
}
type serverConfig struct {
frontend fs.FS
swagger fs.FS
mdb *internal.MemoryDB
db *sql.DB
mq *internal.MessageQueue
lm *livestream.Monitor
frontend fs.FS
swagger fs.FS
mdb *kv.Store
db *bolt.DB
mq *queue.MessageQueue
lm *livestream.Monitor
taskRunner task.TaskRunner
twitchMonitor *twitch.Monitor
}
// TODO: change scope
var observableLogger = logging.NewObservableLogger()
func RunBlocking(rc *RunConfig) {
mdb := internal.NewMemoryDB()
func Run(ctx context.Context, rc *RunConfig) error {
dbPath := filepath.Join(config.Instance().Paths.LocalDatabasePath, "bolt.db")
boltdb, err := bolt.Open(dbPath, 0600, nil)
if err != nil {
return err
}
mdb, err := kv.NewStore(boltdb, time.Second*15)
if err != nil {
return err
}
// ---- LOGGING ---------------------------------------------------
logWriters := []io.Writer{
@@ -68,10 +78,10 @@ func RunBlocking(rc *RunConfig) {
conf := config.Instance()
// file based logging
if conf.EnableFileLogging {
logger, err := logging.NewRotableLogger(conf.LogPath)
if conf.Logging.EnableFileLogging {
logger, err := logging.NewRotableLogger(conf.Logging.LogPath)
if err != nil {
panic(err)
return err
}
defer logger.Rotate()
@@ -94,54 +104,65 @@ func RunBlocking(rc *RunConfig) {
slog.SetDefault(logger)
// ----------------------------------------------------------------
db, err := sql.Open("sqlite", conf.LocalDatabasePath)
mq, err := queue.NewMessageQueue()
if err != nil {
slog.Error("failed to open database", slog.String("err", err.Error()))
}
if err := dbutil.Migrate(context.Background(), db); err != nil {
slog.Error("failed to init database", slog.String("err", err.Error()))
}
mq, err := internal.NewMessageQueue()
if err != nil {
panic(err)
return err
}
mq.SetupConsumers()
go mdb.Restore(mq)
go mdb.EventListener()
lm := livestream.NewMonitor(mq, mdb)
lm := livestream.NewMonitor(mq, mdb, boltdb)
go lm.Schedule()
go lm.Restore()
srv := newServer(serverConfig{
frontend: rc.App,
swagger: rc.Swagger,
mdb: mdb,
mq: mq,
db: db,
lm: lm,
})
tm := twitch.NewMonitor(
twitch.NewAuthenticationManager(
config.Instance().Twitch.ClientId,
config.Instance().Twitch.ClientSecret,
),
boltdb,
)
go tm.Monitor(
ctx,
config.Instance().Twitch.CheckInterval,
twitch.DEFAULT_DOWNLOAD_HANDLER(mdb, mq),
)
go tm.Restore()
go gracefulShutdown(srv, mdb)
go autoPersist(time.Minute*5, mdb, lm)
cronTaskRunner := task.NewCronTaskRunner(mq, mdb)
go cronTaskRunner.Spawner(ctx)
scfg := serverConfig{
frontend: rc.App,
swagger: rc.Swagger,
mdb: mdb,
db: boltdb,
mq: mq,
lm: lm,
twitchMonitor: tm,
taskRunner: cronTaskRunner,
}
srv := newServer(scfg)
go gracefulShutdown(ctx, srv, &scfg)
var (
network = "tcp"
address = fmt.Sprintf("%s:%d", conf.Host, conf.Port)
address = fmt.Sprintf("%s:%d", conf.Server.Host, conf.Server.Port)
)
// support unix sockets
if strings.HasPrefix(conf.Host, "/") {
if strings.HasPrefix(conf.Server.Host, "/") {
network = "unix"
address = conf.Host
address = conf.Server.Host
}
listener, err := net.Listen(network, address)
if err != nil {
slog.Error("failed to listen", slog.String("err", err.Error()))
return
return err
}
slog.Info("yt-dlp-webui started", slog.String("address", address))
@@ -149,14 +170,12 @@ func RunBlocking(rc *RunConfig) {
if err := srv.Serve(listener); err != nil {
slog.Warn("http server stopped", slog.String("err", err.Error()))
}
return nil
}
func newServer(c serverConfig) *http.Server {
archiver.Register(c.db)
cronTaskRunner := task.NewCronTaskRunner(c.mq, c.mdb)
go cronTaskRunner.Spawner(context.TODO())
// archiver.Register(c.db)
service := ytdlpRPC.Container(c.mdb, c.mq, c.lm)
rpc.Register(service)
@@ -180,7 +199,7 @@ func newServer(c serverConfig) *http.Server {
// use in dev
// r.Use(middleware.Logger)
baseUrl := config.Instance().BaseURL
baseUrl := config.Instance().Server.BaseURL
r.Mount(baseUrl+"/", http.StripPrefix(baseUrl, http.FileServerFS(c.frontend)))
// swagger
@@ -188,12 +207,7 @@ func newServer(c serverConfig) *http.Server {
// Filebrowser routes
r.Route("/filebrowser", func(r chi.Router) {
if config.Instance().RequireAuth {
r.Use(middlewares.Authenticated)
}
if config.Instance().UseOpenId {
r.Use(openid.Middleware)
}
r.Use(middlewares.ApplyAuthenticationByConfig)
r.Post("/downloaded", filebrowser.ListDownloaded)
r.Post("/delete", filebrowser.DeleteFile)
r.Get("/d/{id}", filebrowser.DownloadFile)
@@ -202,7 +216,7 @@ func newServer(c serverConfig) *http.Server {
})
// Archive routes
r.Route("/archive", archive.ApplyRouter(c.db))
// r.Route("/archive", archive.ApplyRouter(c.db))
// Authentication routes
r.Route("/auth", func(r chi.Router) {
@@ -224,6 +238,7 @@ func newServer(c serverConfig) *http.Server {
DB: c.db,
MDB: c.mdb,
MQ: c.mq,
LM: c.lm,
}))
// Logging
@@ -233,41 +248,35 @@ func newServer(c serverConfig) *http.Server {
r.Route("/status", status.ApplyRouter(c.mdb))
// Subscriptions
r.Route("/subscriptions", subscription.Container(c.db, cronTaskRunner).ApplyRouter())
r.Route("/subscriptions", subscription.Container(c.db, c.taskRunner).ApplyRouter())
// Twitch
r.Route("/twitch", func(r chi.Router) {
r.Use(middlewares.ApplyAuthenticationByConfig)
r.Get("/users", twitch.GetMonitoredUsers(c.twitchMonitor))
r.Post("/user", twitch.MonitorUserHandler(c.twitchMonitor))
r.Delete("/user/{user}", twitch.DeleteUser(c.twitchMonitor))
})
// Pipelines
r.Route("/pipelines", func(r chi.Router) {
h := pipeline.NewRestHandler(c.db)
r.Use(middlewares.ApplyAuthenticationByConfig)
r.Get("/id/{id}", h.GetPipeline)
r.Get("/all", h.GetAllPipelines)
r.Post("/", h.SavePipeline)
r.Delete("/id/{id}", h.DeletePipeline)
})
return &http.Server{Handler: r}
}
func gracefulShutdown(srv *http.Server, db *internal.MemoryDB) {
ctx, stop := signal.NotifyContext(context.Background(),
os.Interrupt,
syscall.SIGTERM,
syscall.SIGQUIT,
)
func gracefulShutdown(ctx context.Context, srv *http.Server, cfg *serverConfig) {
<-ctx.Done()
slog.Info("shutdown signal received")
go func() {
<-ctx.Done()
slog.Info("shutdown signal received")
defer func() {
db.Persist()
stop()
srv.Shutdown(context.Background())
}()
defer func() {
cfg.db.Close()
srv.Shutdown(context.Background())
}()
}
func autoPersist(d time.Duration, db *internal.MemoryDB, lm *livestream.Monitor) {
for {
if err := db.Persist(); err != nil {
slog.Warn("failed to persisted session", slog.Any("err", err))
}
if err := lm.Persist(); err != nil {
slog.Warn(
"failed to persisted livestreams monitor session", slog.Any("err", err.Error()))
}
slog.Debug("sucessfully persisted session")
time.Sleep(d)
}
}

View File

@@ -5,11 +5,12 @@ import (
"slices"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/status/domain"
)
type Repository struct {
mdb *internal.MemoryDB
mdb *kv.Store
}
// DownloadSpeed implements domain.Repository.
@@ -29,7 +30,7 @@ func (r *Repository) DownloadSpeed(ctx context.Context) int64 {
func (r *Repository) Completed(ctx context.Context) int {
processes := r.mdb.All()
completed := slices.DeleteFunc(*processes, func(p internal.ProcessResponse) bool {
completed := slices.DeleteFunc(*processes, func(p internal.ProcessSnapshot) bool {
return p.Progress.Status != internal.StatusCompleted
})
@@ -40,7 +41,7 @@ func (r *Repository) Completed(ctx context.Context) int {
func (r *Repository) Downloading(ctx context.Context) int {
processes := r.mdb.All()
downloading := slices.DeleteFunc(*processes, func(p internal.ProcessResponse) bool {
downloading := slices.DeleteFunc(*processes, func(p internal.ProcessSnapshot) bool {
return p.Progress.Status != internal.StatusDownloading
})
@@ -51,14 +52,14 @@ func (r *Repository) Downloading(ctx context.Context) int {
func (r *Repository) Pending(ctx context.Context) int {
processes := r.mdb.All()
pending := slices.DeleteFunc(*processes, func(p internal.ProcessResponse) bool {
pending := slices.DeleteFunc(*processes, func(p internal.ProcessSnapshot) bool {
return p.Progress.Status != internal.StatusPending
})
return len(pending)
}
func New(mdb *internal.MemoryDB) domain.Repository {
func New(mdb *kv.Store) domain.Repository {
return &Repository{
mdb: mdb,
}

View File

@@ -2,16 +2,16 @@ package status
import (
"github.com/go-chi/chi/v5"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/status/repository"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/status/rest"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/status/service"
)
func ApplyRouter(mdb *internal.MemoryDB) func(chi.Router) {
func ApplyRouter(mdb *kv.Store) func(chi.Router) {
var (
r = repository.New(mdb)
s = service.New(r, nil) //TODO: nil, wtf?
s = service.New(r, nil)
h = rest.New(s)
)

View File

@@ -1,13 +1,13 @@
package subscription
import (
"database/sql"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/domain"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/task"
bolt "go.etcd.io/bbolt"
)
func Container(db *sql.DB, runner task.TaskRunner) domain.RestHandler {
func Container(db *bolt.DB, runner task.TaskRunner) domain.RestHandler {
var (
r = provideRepository(db)
s = provideService(r, runner)

View File

@@ -1,7 +1,6 @@
package subscription
import (
"database/sql"
"sync"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/domain"
@@ -9,6 +8,8 @@ import (
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/rest"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/service"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/task"
bolt "go.etcd.io/bbolt"
)
var (
@@ -21,7 +22,7 @@ var (
handOnce sync.Once
)
func provideRepository(db *sql.DB) domain.Repository {
func provideRepository(db *bolt.DB) domain.Repository {
repoOnce.Do(func() {
repo = repository.New(db)
})

View File

@@ -2,131 +2,142 @@ package repository
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"github.com/google/uuid"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/data"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/domain"
bolt "go.etcd.io/bbolt"
)
var bucketName = []byte("subscriptions")
type Repository struct {
db *sql.DB
db *bolt.DB
}
// Delete implements domain.Repository.
func (r *Repository) Delete(ctx context.Context, id string) error {
conn, err := r.db.Conn(ctx)
if err != nil {
return err
}
defer conn.Close()
_, err = conn.ExecContext(ctx, "DELETE FROM subscriptions WHERE id = ?", id)
return err
return r.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
return b.Delete([]byte(id))
})
}
// GetCursor implements domain.Repository.
func (r *Repository) GetCursor(ctx context.Context, id string) (int64, error) {
conn, err := r.db.Conn(ctx)
func (s *Repository) GetCursor(ctx context.Context, id string) (int64, error) {
var cursor int64
err := s.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("subscriptions"))
v := b.Get([]byte(id))
if v == nil {
return fmt.Errorf("subscription %s not found", id)
}
var data struct {
Cursor int64 `json:"cursor"`
}
if err := json.Unmarshal(v, &data); err != nil {
return err
}
cursor = data.Cursor
return nil
})
if err != nil {
return -1, err
}
defer conn.Close()
row := conn.QueryRowContext(ctx, "SELECT rowid FROM subscriptions WHERE id = ?", id)
var rowId int64
if err := row.Scan(&rowId); err != nil {
return -1, err
}
return rowId, nil
return cursor, nil
}
// List implements domain.Repository.
func (r *Repository) List(ctx context.Context, start int64, limit int) (*[]data.Subscription, error) {
conn, err := r.db.Conn(ctx)
var subs []data.Subscription
err := r.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
return b.ForEach(func(k, v []byte) error {
var sub data.Subscription
if err := json.Unmarshal(v, &sub); err != nil {
return err
}
subs = append(subs, sub)
return nil
})
})
if err != nil {
return nil, err
}
defer conn.Close()
var elements []data.Subscription
rows, err := conn.QueryContext(ctx, "SELECT rowid, * FROM subscriptions WHERE rowid > ? LIMIT ?", start, limit)
if err != nil {
return nil, err
}
for rows.Next() {
var rowId int64
var element data.Subscription
if err := rows.Scan(
&rowId,
&element.Id,
&element.URL,
&element.Params,
&element.CronExpr,
); err != nil {
return &elements, err
}
elements = append(elements, element)
}
return &elements, nil
return &subs, nil
}
// Submit implements domain.Repository.
func (r *Repository) Submit(ctx context.Context, sub *data.Subscription) (*data.Subscription, error) {
conn, err := r.db.Conn(ctx)
func (s *Repository) Submit(ctx context.Context, sub *data.Subscription) (*data.Subscription, error) {
if sub.Id == "" {
sub.Id = uuid.NewString()
}
data, err := json.Marshal(sub)
if err != nil {
return nil, err
}
defer conn.Close()
err = s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("subscriptions"))
return b.Put([]byte(sub.Id), data)
})
_, err = conn.ExecContext(
ctx,
"INSERT INTO subscriptions (id, url, params, cron) VALUES (?, ?, ?, ?)",
uuid.NewString(),
sub.URL,
sub.Params,
sub.CronExpr,
)
if err != nil {
return nil, err
}
return sub, err
return sub, nil
}
// UpdateByExample implements domain.Repository.
func (r *Repository) UpdateByExample(ctx context.Context, example *data.Subscription) error {
conn, err := r.db.Conn(ctx)
if err != nil {
return err
}
func (s *Repository) UpdateByExample(ctx context.Context, example *data.Subscription) error {
return s.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("subscriptions"))
defer conn.Close()
return b.ForEach(func(k, v []byte) error {
var sub data.Subscription
if err := json.Unmarshal(v, &sub); err != nil {
return err
}
_, err = conn.ExecContext(
ctx,
"UPDATE subscriptions SET url = ?, params = ?, cron = ? WHERE id = ? OR url = ?",
example.URL,
example.Params,
example.CronExpr,
example.Id,
example.URL,
)
if sub.Id == example.Id || sub.URL == example.URL {
// aggiorna i campi
sub.URL = example.URL
sub.Params = example.Params
sub.CronExpr = example.CronExpr
return err
data, err := json.Marshal(sub)
if err != nil {
return err
}
if err := b.Put(k, data); err != nil {
return err
}
}
return nil
})
})
}
func New(db *sql.DB) domain.Repository {
func New(db *bolt.DB) domain.Repository {
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucketName)
return err
})
return &Repository{
db: db,
}

View File

@@ -19,10 +19,10 @@ type RestHandler struct {
// ApplyRouter implements domain.RestHandler.
func (h *RestHandler) ApplyRouter() func(chi.Router) {
return func(r chi.Router) {
if config.Instance().RequireAuth {
if config.Instance().Authentication.RequireAuth {
r.Use(middlewares.Authenticated)
}
if config.Instance().UseOpenId {
if config.Instance().OpenId.UseOpenId {
r.Use(openid.Middleware)
}

View File

@@ -53,6 +53,7 @@ func toDB(dto *domain.Subscription) data.Subscription {
// Delete implements domain.Service.
func (s *Service) Delete(ctx context.Context, id string) error {
s.runner.StopTask(id)
return s.r.Delete(ctx, id)
}

View File

@@ -7,11 +7,13 @@ import (
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/archive"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/subscription/domain"
"github.com/robfig/cron/v3"
)
@@ -19,33 +21,36 @@ import (
type TaskRunner interface {
Submit(subcription *domain.Subscription) error
Spawner(ctx context.Context)
StopTask(id string) error
Recoverer()
}
type taskPair struct {
type monitorTask struct {
Done chan struct{}
Schedule cron.Schedule
Subscription *domain.Subscription
}
type CronTaskRunner struct {
mq *internal.MessageQueue
db *internal.MemoryDB
mq *queue.MessageQueue
db *kv.Store
tasks chan taskPair
tasks chan monitorTask
errors chan error
running map[string]*monitorTask
}
func NewCronTaskRunner(mq *internal.MessageQueue, db *internal.MemoryDB) TaskRunner {
func NewCronTaskRunner(mq *queue.MessageQueue, db *kv.Store) TaskRunner {
return &CronTaskRunner{
mq: mq,
db: db,
tasks: make(chan taskPair),
errors: make(chan error),
mq: mq,
db: db,
tasks: make(chan monitorTask),
errors: make(chan error),
running: make(map[string]*monitorTask),
}
}
const commandTemplate = "-I1 --flat-playlist --print webpage_url $1"
var argsSplitterRe = regexp.MustCompile(`(?mi)[^\s"']+|"([^"]*)"|'([^']*)'`)
func (t *CronTaskRunner) Submit(subcription *domain.Subscription) error {
@@ -54,7 +59,8 @@ func (t *CronTaskRunner) Submit(subcription *domain.Subscription) error {
return err
}
job := taskPair{
job := monitorTask{
Done: make(chan struct{}),
Schedule: schedule,
Subscription: subcription,
}
@@ -64,54 +70,110 @@ func (t *CronTaskRunner) Submit(subcription *domain.Subscription) error {
return nil
}
// Handles the entire lifecylce of a monitor job.
func (t *CronTaskRunner) Spawner(ctx context.Context) {
for task := range t.tasks {
for req := range t.tasks {
t.running[req.Subscription.Id] = &req // keep track of the current job
go func() {
ctx, cancel := context.WithCancel(ctx) // inject into the job's context a cancellation singal
fetcherEvents := t.doFetch(ctx, &req) // retrieve the channel of events of the job
for {
slog.Info("fetching latest video for channel", slog.String("channel", task.Subscription.URL))
fetcherParams := strings.Split(strings.Replace(commandTemplate, "$1", task.Subscription.URL, 1), " ")
cmd := exec.CommandContext(
ctx,
config.Instance().DownloaderPath,
fetcherParams...,
)
stdout, err := cmd.Output()
if err != nil {
t.errors <- err
select {
case <-req.Done:
slog.Info("stopping cron job and removing schedule", slog.String("url", req.Subscription.URL))
cancel()
return
case <-fetcherEvents:
slog.Info("finished monitoring channel", slog.String("url", req.Subscription.URL))
}
latestChannelURL := string(bytes.Trim(stdout, "\n"))
p := &internal.Process{
Url: latestChannelURL,
Params: append(argsSplitterRe.FindAllString(task.Subscription.Params, 1), []string{
"--download-archive",
filepath.Join(config.Instance().Dir(), "archive.txt"),
}...),
AutoRemove: true,
}
t.db.Set(p)
t.mq.Publish(p)
nextSchedule := time.Until(task.Schedule.Next(time.Now()))
slog.Info(
"cron task runner next schedule",
slog.String("url", task.Subscription.URL),
slog.Any("duration", nextSchedule),
)
time.Sleep(nextSchedule)
}
}()
}
}
func (t *CronTaskRunner) Recoverer() {
panic("Unimplemented")
// Stop a currently scheduled job
func (t *CronTaskRunner) StopTask(id string) error {
task := t.running[id]
if task != nil {
t.running[id].Done <- struct{}{}
delete(t.running, id)
}
return nil
}
// Start a fetcher and notify on a channel when a fetcher has completed
func (t *CronTaskRunner) doFetch(ctx context.Context, req *monitorTask) <-chan struct{} {
completed := make(chan struct{})
// generator func
go func() {
for {
sleepFor := t.fetcher(ctx, req)
completed <- struct{}{}
time.Sleep(sleepFor)
}
}()
return completed
}
// Perform the retrieval of the latest video of the channel.
// Returns a time.Duration containing the amount of time to the next schedule.
func (t *CronTaskRunner) fetcher(ctx context.Context, req *monitorTask) time.Duration {
slog.Info("fetching latest video for channel", slog.String("channel", req.Subscription.URL))
nextSchedule := time.Until(req.Schedule.Next(time.Now()))
cmd := exec.CommandContext(
ctx,
config.Instance().Paths.DownloaderPath,
"-I1",
"--flat-playlist",
"--print", "webpage_url",
req.Subscription.URL,
)
stdout, err := cmd.Output()
if err != nil {
t.errors <- err
return time.Duration(0)
}
latestVideoURL := string(bytes.Trim(stdout, "\n"))
// if the download exists there's not point in sending it into the message queue.
exists, err := archive.DownloadExists(ctx, latestVideoURL)
if exists && err == nil {
return nextSchedule
}
// TODO: autoremove hook
d := downloaders.NewGenericDownload(
latestVideoURL,
append(
argsSplitterRe.FindAllString(req.Subscription.Params, 1),
[]string{
"--break-on-existing",
"--download-archive",
filepath.Join(config.Instance().Dir(), "archive.txt"),
}...),
)
t.db.Set(d) // give it an id
t.mq.Publish(d) // send it to the message queue waiting to be processed
slog.Info(
"cron task runner next schedule",
slog.String("url", req.Subscription.URL),
slog.Any("duration", nextSchedule),
)
return nextSchedule
}
func (t *CronTaskRunner) Recoverer() {
panic("unimplemented")
}

View File

@@ -14,7 +14,7 @@ import (
// FreeSpace gets the available Bytes writable to download directory
func FreeSpace() (uint64, error) {
var stat unix.Statfs_t
unix.Statfs(config.Instance().DownloadPath, &stat)
unix.Statfs(config.Instance().Paths.DownloadPath, &stat)
return (stat.Bavail * uint64(stat.Bsize)), nil
}
@@ -27,7 +27,7 @@ func DirectoryTree() (*[]string, error) {
}
var (
rootPath = config.Instance().DownloadPath
rootPath = config.Instance().Paths.DownloadPath
stack = internal.NewStack[Node]()
flattened = make([]string, 0)

75
server/twitch/auth.go Normal file
View File

@@ -0,0 +1,75 @@
package twitch
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
)
const authURL = "https://id.twitch.tv/oauth2/token"
type AuthResponse struct {
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
TokenType string `json:"token_type"`
}
type AccessToken struct {
Token string
Expiry time.Time
}
type AuthenticationManager struct {
clientId string
clientSecret string
accesToken *AccessToken
}
func NewAuthenticationManager(clientId, clientSecret string) *AuthenticationManager {
return &AuthenticationManager{
clientId: clientId,
clientSecret: clientSecret,
accesToken: &AccessToken{},
}
}
func (a *AuthenticationManager) GetAccessToken() (*AccessToken, error) {
if a.accesToken != nil && a.accesToken.Token != "" && a.accesToken.Expiry.After(time.Now()) {
return a.accesToken, nil
}
data := url.Values{}
data.Set("client_id", a.clientId)
data.Set("client_secret", a.clientSecret)
data.Set("grant_type", "client_credentials")
resp, err := http.PostForm(authURL, data)
if err != nil {
return nil, fmt.Errorf("errore richiesta token: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("status non OK: %s", resp.Status)
}
var auth AuthResponse
if err := json.NewDecoder(resp.Body).Decode(&auth); err != nil {
return nil, fmt.Errorf("errore decoding JSON: %w", err)
}
token := &AccessToken{
Token: auth.AccessToken,
Expiry: time.Now().Add(time.Duration(auth.ExpiresIn) * time.Second),
}
a.accesToken = token
return token, nil
}
func (a *AuthenticationManager) GetClientId() string {
return a.clientId
}

91
server/twitch/client.go Normal file
View File

@@ -0,0 +1,91 @@
package twitch
import (
"encoding/json"
"io"
"net/http"
"time"
)
const twitchAPIURL = "https://api.twitch.tv/helix"
type Client struct {
authenticationManager AuthenticationManager
}
func NewTwitchClient(am *AuthenticationManager) *Client {
return &Client{
authenticationManager: *am,
}
}
type streamResp struct {
Data []struct {
ID string `json:"id"`
UserName string `json:"user_name"`
Title string `json:"title"`
GameName string `json:"game_name"`
StartedAt string `json:"started_at"`
} `json:"data"`
}
func (c *Client) doRequest(endpoint string, params map[string]string) ([]byte, error) {
token, err := c.authenticationManager.GetAccessToken()
if err != nil {
return nil, err
}
reqURL := twitchAPIURL + endpoint
req, err := http.NewRequest("GET", reqURL, nil)
if err != nil {
return nil, err
}
q := req.URL.Query()
for k, v := range params {
q.Set(k, v)
}
req.URL.RawQuery = q.Encode()
req.Header.Set("Client-Id", c.authenticationManager.GetClientId())
req.Header.Set("Authorization", "Bearer "+token.Token)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return io.ReadAll(resp.Body)
}
func (c *Client) PollStream(channel string, liveChannel chan<- *StreamInfo) error {
body, err := c.doRequest("/streams", map[string]string{"user_login": channel})
if err != nil {
return err
}
var sr streamResp
if err := json.Unmarshal(body, &sr); err != nil {
return err
}
if len(sr.Data) == 0 {
liveChannel <- &StreamInfo{UserName: channel, IsLive: false}
return nil
}
s := sr.Data[0]
started, _ := time.Parse(time.RFC3339, s.StartedAt)
liveChannel <- &StreamInfo{
ID: s.ID,
UserName: s.UserName,
Title: s.Title,
GameName: s.GameName,
StartedAt: started,
IsLive: true,
}
return nil
}

169
server/twitch/monitor.go Normal file
View File

@@ -0,0 +1,169 @@
package twitch
import (
"context"
"fmt"
"iter"
"log/slog"
"maps"
"path/filepath"
"sync"
"time"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/downloaders"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/kv"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/pipes"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/internal/queue"
bolt "go.etcd.io/bbolt"
)
var bucket = []byte("twitch-monitor")
type Monitor struct {
liveChannel chan *StreamInfo
monitored map[string]*Client
lastState map[string]bool
mu sync.RWMutex
db *bolt.DB
authenticationManager *AuthenticationManager
}
func NewMonitor(authenticationManager *AuthenticationManager, db *bolt.DB) *Monitor {
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucket)
return err
})
return &Monitor{
liveChannel: make(chan *StreamInfo, 16),
monitored: make(map[string]*Client),
lastState: make(map[string]bool),
authenticationManager: authenticationManager,
db: db,
}
}
func (m *Monitor) Add(user string) {
m.mu.Lock()
m.monitored[user] = NewTwitchClient(m.authenticationManager)
m.mu.Unlock()
m.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
//TODO: the empty byte array will be replaced with configs per user
err := b.Put([]byte(user), []byte(""))
return err
})
slog.Info("added user to twitch monitor", slog.String("user", user))
}
func (m *Monitor) Monitor(ctx context.Context, interval time.Duration, handler func(user string) error) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
m.mu.RLock()
for user, client := range m.monitored {
u := user
c := client
go func() {
if err := c.PollStream(u, m.liveChannel); err != nil {
slog.Error("polling failed", slog.String("user", u), slog.Any("err", err))
}
}()
}
m.mu.RUnlock()
case stream := <-m.liveChannel:
wasLive := m.lastState[stream.UserName]
if stream.IsLive && !wasLive {
slog.Info("stream went live", slog.String("user", stream.UserName))
if err := handler(stream.UserName); err != nil {
slog.Error("handler failed", slog.String("user", stream.UserName), slog.Any("err", err))
}
}
m.lastState[stream.UserName] = stream.IsLive
case <-ctx.Done():
slog.Info("stopping twitch monitor")
return
}
}
}
func (m *Monitor) GetMonitoredUsers() iter.Seq[string] {
m.mu.RLock()
defer m.mu.RUnlock()
return maps.Keys(m.monitored)
}
func (m *Monitor) DeleteUser(user string) {
m.mu.Lock()
delete(m.monitored, user)
delete(m.lastState, user)
m.mu.Unlock()
m.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
err := b.Delete([]byte(user))
return err
})
}
func DEFAULT_DOWNLOAD_HANDLER(db *kv.Store, mq *queue.MessageQueue) func(user string) error {
return func(user string) error {
var (
url = fmt.Sprintf("https://www.twitch.tv/%s", user)
filename = filepath.Join(
config.Instance().Paths.DownloadPath,
fmt.Sprintf("%s (live) %s", user, time.Now().Format(time.ANSIC)),
)
ext = ".webm"
path = filename + ext
)
d := downloaders.NewLiveStreamDownloader(url, []pipes.Pipe{
&pipes.Transcoder{
Args: []string{
"-c:a", "libopus",
"-c:v", "libsvtav1",
"-crf", "30",
"-preset", "7",
},
},
&pipes.FileWriter{
Path: path,
IsFinal: true,
},
})
db.Set(d)
mq.Publish(d)
return nil
}
}
func (m *Monitor) Restore() error {
var users []string
m.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
return b.ForEach(func(k, v []byte) error {
users = append(users, string(k))
return nil
})
})
m.monitored = make(map[string]*Client)
for _, user := range users {
m.monitored[user] = NewTwitchClient(m.authenticationManager)
}
return nil
}

65
server/twitch/rest.go Normal file
View File

@@ -0,0 +1,65 @@
package twitch
import (
"encoding/json"
"net/http"
"slices"
"github.com/go-chi/chi/v5"
)
type addUserReq struct {
User string `json:"user"`
}
func MonitorUserHandler(m *Monitor) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req addUserReq
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
m.Add(req.User)
if err := json.NewEncoder(w).Encode("ok"); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func GetMonitoredUsers(m *Monitor) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
it := m.GetMonitoredUsers()
users := slices.Collect(it)
if users == nil {
users = make([]string, 0)
}
if err := json.NewEncoder(w).Encode(users); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
func DeleteUser(m *Monitor) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
user := chi.URLParam(r, "user")
if user == "" {
http.Error(w, "empty user", http.StatusBadRequest)
return
}
m.DeleteUser(user)
if err := json.NewEncoder(w).Encode("ok"); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}

20
server/twitch/types.go Normal file
View File

@@ -0,0 +1,20 @@
package twitch
import "time"
type StreamInfo struct {
ID string
UserName string
Title string
GameName string
StartedAt time.Time
IsLive bool
}
type VodInfo struct {
ID string
Title string
URL string
Duration string
CreatedAt time.Time
}

View File

@@ -8,7 +8,7 @@ import (
// Update using the builtin function of yt-dlp
func UpdateExecutable() error {
cmd := exec.Command(config.Instance().DownloaderPath, "-U")
cmd := exec.Command(config.Instance().Paths.DownloaderPath, "-U")
err := cmd.Start()
if err != nil {

View File

@@ -8,6 +8,7 @@ import (
"github.com/golang-jwt/jwt/v5"
"github.com/marcopiovanello/yt-dlp-web-ui/v3/server/config"
"golang.org/x/crypto/bcrypt"
)
const TOKEN_COOKIE_NAME = "jwt-yt-dlp-webui"
@@ -26,11 +27,17 @@ func Login(w http.ResponseWriter, r *http.Request) {
}
var (
username = config.Instance().Username
password = config.Instance().Password
username = config.Instance().Authentication.Username
passwordHash = config.Instance().Authentication.PasswordHash
)
if username != req.Username || password != req.Password {
err := bcrypt.CompareHashAndPassword([]byte(passwordHash), []byte(req.Password))
if err != nil {
http.Error(w, "invalid username or password", http.StatusBadRequest)
return
}
if username != req.Username {
http.Error(w, "invalid username or password", http.StatusBadRequest)
return
}