Compare commits

...

5 Commits
main ... deploy

8 changed files with 403 additions and 25 deletions

113
Caddyfile Normal file
View File

@ -0,0 +1,113 @@
# VidRip Caddyfile
# This configuration serves the frontend and proxies API requests to the backend
#
# NOTE: This file will be auto-generated by start-production.sh if it doesn't exist.
# The script will prompt you for your domain name.
#
# Manual Usage:
# 1. Install Caddy: https://caddyserver.com/docs/install
# 2. Update 'your-domain.com' below with your actual domain
# 3. Run: caddy run (for testing) or caddy start (background)
# 4. Or use systemd service (see DEPLOYMENT.md)
#
# Features:
# - Automatic HTTPS with Let's Encrypt
# - Reverse proxy to backend API
# - Static file serving for frontend
# - Compression enabled
# - Security headers
# Replace with your domain or use :80 for localhost
your-domain.com {
# Enable compression
encode gzip zstd
# Security headers
header {
# Enable HSTS (forces HTTPS)
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
# Prevent clickjacking
X-Frame-Options "SAMEORIGIN"
# Prevent MIME type sniffing
X-Content-Type-Options "nosniff"
# Enable XSS protection
X-XSS-Protection "1; mode=block"
# Referrer policy
Referrer-Policy "strict-origin-when-cross-origin"
# Remove server header for security
-Server
}
# API routes - proxy to backend
handle /api/* {
reverse_proxy localhost:3001 {
# Health check
health_uri /api/health
health_interval 10s
health_timeout 5s
# Headers
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
}
}
# Serve frontend static files
handle {
# Root directory is the frontend build output
root * /var/www/vidrip
# Try files first, fall back to index.html for SPA routing
try_files {path} /index.html
# Serve files
file_server
# Cache static assets
@static {
path *.js *.css *.woff *.woff2 *.ttf *.eot *.ico *.png *.jpg *.jpeg *.gif *.svg *.webp
}
header @static {
Cache-Control "public, max-age=31536000, immutable"
}
# Don't cache index.html
@html {
path *.html
}
header @html {
Cache-Control "no-cache, no-store, must-revalidate"
}
}
# Logging
log {
output file /var/log/caddy/vidrip-access.log {
roll_size 100mb
roll_keep 10
}
format json
}
}
# Alternative configuration for local development/testing without a domain
# Uncomment this and comment out the domain configuration above
# :80 {
# encode gzip zstd
#
# handle /api/* {
# reverse_proxy localhost:3001
# }
#
# handle {
# root * /var/www/vidrip
# try_files {path} /index.html
# file_server
# }
#
# log {
# output stdout
# }
# }

View File

@ -13,6 +13,9 @@ chmod +x start-production.sh stop-production.sh
# Start the service
./start-production.sh
# When prompted, enter your domain name (e.g., vidrip.example.com)
# Or press Enter to skip Caddyfile generation
# Stop the service
./stop-production.sh
```
@ -21,8 +24,11 @@ The `start-production.sh` script will:
- Check system requirements (Node.js 18+, yt-dlp)
- Install dependencies
- Build backend and frontend
- **Ask for your domain and auto-generate Caddyfile** (if not exists)
- Deploy frontend to `/var/www/vidrip`
- Start the backend server in the background
- Create log files in `logs/` directory
- Show next steps for Caddy installation
## Production Deployment (Recommended)
@ -106,7 +112,63 @@ sudo systemctl start vidrip
sudo systemctl status vidrip
```
### 6. Setup Nginx Reverse Proxy
### 6. Setup Web Server (Reverse Proxy)
Choose one of the following options:
#### Option A: Caddy (Recommended - Easiest with automatic HTTPS)
**Install Caddy:**
```bash
# Ubuntu/Debian
sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list
sudo apt update
sudo apt install caddy
```
**Configure Caddyfile:**
If you ran `start-production.sh` and entered your domain, a Caddyfile was auto-generated for you!
Otherwise, edit the included template:
```bash
# Edit Caddyfile
nano Caddyfile
# Change line 21:
# FROM: your-domain.com {
# TO: yourdomain.com {
```
**Deploy Caddyfile:**
```bash
# Copy to standard location
sudo cp Caddyfile /etc/caddy/Caddyfile
# Validate configuration
sudo caddy validate --config /etc/caddy/Caddyfile
# Enable and start Caddy
sudo systemctl enable caddy
sudo systemctl start caddy
# Check status
sudo systemctl status caddy
```
Caddy will automatically obtain and renew SSL certificates from Let's Encrypt!
**View logs:**
```bash
sudo journalctl -u caddy -f
```
#### Option B: Nginx
Create `/etc/nginx/sites-available/vidrip`:
@ -117,7 +179,7 @@ server {
# Serve frontend static files
location / {
root /var/www/vidrip/frontend/dist;
root /var/www/vidrip;
try_files $uri $uri/ /index.html;
}
@ -134,11 +196,8 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
}
# Serve downloaded video files
location /downloads/ {
alias /var/www/vidrip/backend/downloads/;
add_header Content-Type video/mp4;
}
# Note: Video files are now served via /api/videos/:id/stream
# This handles both local and WebDAV storage automatically
# Increase upload size if needed
client_max_body_size 100M;
@ -153,13 +212,15 @@ sudo nginx -t
sudo systemctl reload nginx
```
### 7. Setup SSL with Let's Encrypt (Recommended)
**Setup SSL with Let's Encrypt:**
```bash
sudo apt-get install certbot python3-certbot-nginx
sudo certbot --nginx -d your-domain.com
```
Note: If using Caddy, SSL is automatic - skip this step!
## Systemd Service Management
```bash

View File

@ -89,7 +89,8 @@ export function initDatabase() {
intervalMinutes: '180', // 180 minutes = 3 hours
varianceMinutes: '30',
maxConcurrentDownloads: '1',
enabled: 'true'
enabled: 'true',
downloadsPath: path.join(__dirname, '../../downloads')
};
const insertConfig = db.prepare(

View File

@ -1,7 +1,7 @@
import express from 'express';
import cors from 'cors';
import path from 'path';
import { initDatabase } from './db/database';
import { initDatabase, configOperations } from './db/database';
import { startScheduler } from './services/scheduler';
import channelsRouter from './routes/channels';
import playlistsRouter from './routes/playlists';
@ -24,8 +24,9 @@ app.use('/api/playlists', playlistsRouter);
app.use('/api/videos', videosRouter);
app.use('/api/config', configRouter);
// Serve downloaded videos
app.use('/downloads', express.static(path.join(__dirname, '../downloads')));
// Serve downloaded videos from configured path
const downloadsPath = configOperations.get('downloadsPath') || path.join(__dirname, '../downloads');
app.use('/downloads', express.static(downloadsPath));
// Health check
app.get('/api/health', (req, res) => {

View File

@ -1,12 +1,19 @@
import { spawn } from 'child_process';
import path from 'path';
import fs from 'fs';
import { configOperations } from '../db/database';
const DOWNLOADS_DIR = path.join(__dirname, '../../downloads');
// Get downloads directory from config
function getDownloadsDir(): string {
const downloadsPath = configOperations.get('downloadsPath');
const dir = downloadsPath || path.join(__dirname, '../../downloads');
// Ensure downloads directory exists
if (!fs.existsSync(DOWNLOADS_DIR)) {
fs.mkdirSync(DOWNLOADS_DIR, { recursive: true });
// Ensure downloads directory exists
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
return dir;
}
// Path to cookies file (optional, for additional authentication)
@ -296,7 +303,8 @@ export async function downloadVideo(
options: DownloadOptions = {}
): Promise<{ filePath: string; fileSize: number }> {
return new Promise((resolve, reject) => {
const outputTemplate = path.join(DOWNLOADS_DIR, `${videoId}.%(ext)s`);
const downloadsDir = getDownloadsDir();
const outputTemplate = path.join(downloadsDir, `${videoId}.%(ext)s`);
const args = [
...getCommonArgs(),
@ -360,10 +368,10 @@ export async function downloadVideo(
// Find the downloaded file
if (!outputPath) {
const files = fs.readdirSync(DOWNLOADS_DIR);
const files = fs.readdirSync(downloadsDir);
const videoFile = files.find(f => f.startsWith(videoId) && f.endsWith('.mp4'));
if (videoFile) {
outputPath = path.join(DOWNLOADS_DIR, videoFile);
outputPath = path.join(downloadsDir, videoFile);
}
}

View File

@ -224,6 +224,22 @@ function SettingsPage() {
Number of videos to download simultaneously (recommended: 1)
</p>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 dark:text-gray-300">
Downloads Directory Path
</label>
<input
type="text"
value={config.downloadsPath || ''}
onChange={(e) => handleChange('downloadsPath', e.target.value)}
className="mt-1 block w-full rounded-md border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-700 text-gray-900 dark:text-white shadow-sm focus:border-blue-500 focus:ring-blue-500"
placeholder="/var/www/vidrip/downloads"
/>
<p className="mt-1 text-sm text-gray-500 dark:text-gray-400">
Absolute path where downloaded videos will be saved. Can point to a mounted WebDAV directory.
</p>
</div>
</div>
<div className="mt-6 flex justify-end">

View File

@ -41,6 +41,7 @@ export interface Config {
varianceMinutes: string;
maxConcurrentDownloads: string;
enabled: string;
downloadsPath: string;
}
export interface SchedulerStatus {

View File

@ -19,7 +19,11 @@ BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
WEB_ROOT="/var/www/vidrip"
SCRIPT_DIR=$(pwd)
echo -e "$SCRIPT_DIR"
rm -rf "$WEB_ROOT/*"
cd "$SCRIPT_DIR"
# Configuration
@ -29,6 +33,7 @@ LOG_DIR="$SCRIPT_DIR/logs"
PID_FILE="$SCRIPT_DIR/vidrip.pid"
LOG_FILE="$LOG_DIR/vidrip.log"
ERROR_LOG_FILE="$LOG_DIR/vidrip-error.log"
WEB_ROOT="/var/www/vidrip"
################################################################################
# Helper Functions
@ -191,6 +196,143 @@ log_success "Frontend built successfully"
cd "$SCRIPT_DIR"
echo ""
################################################################################
# Generate Caddyfile (if needed)
################################################################################
if [ ! -f "$SCRIPT_DIR/Caddyfile" ]; then
log_info "Caddyfile not found. Let's create one!"
echo ""
# Ask for domain
read -p "Enter your domain name (or press Enter to skip): " DOMAIN_INPUT
if [ -n "$DOMAIN_INPUT" ]; then
log_info "Generating Caddyfile for domain: $DOMAIN_INPUT"
cat > "$SCRIPT_DIR/Caddyfile" << EOF
# VidRip Caddyfile
# Auto-generated configuration for ${DOMAIN_INPUT}
${DOMAIN_INPUT} {
# Enable compression
encode gzip zstd
# Security headers
header {
# Enable HSTS (forces HTTPS)
Strict-Transport-Security "max-age=31536000; includeSubDomains; preload"
# Prevent clickjacking
X-Frame-Options "SAMEORIGIN"
# Prevent MIME type sniffing
X-Content-Type-Options "nosniff"
# Enable XSS protection
X-XSS-Protection "1; mode=block"
# Referrer policy
Referrer-Policy "strict-origin-when-cross-origin"
# Remove server header for security
-Server
}
# API routes - proxy to backend
handle /api/* {
reverse_proxy localhost:3001 {
# Health check
health_uri /api/health
health_interval 10s
health_timeout 5s
# Headers
header_up Host {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
}
}
# Serve frontend static files
handle {
# Root directory is the frontend build output
root * ${WEB_ROOT}
# Try files first, fall back to index.html for SPA routing
try_files {path} /index.html
# Serve files
file_server
# Cache static assets
@static {
path *.js *.css *.woff *.woff2 *.ttf *.eot *.ico *.png *.jpg *.jpeg *.gif *.svg *.webp
}
header @static {
Cache-Control "public, max-age=31536000, immutable"
}
# Don't cache index.html
@html {
path *.html
}
header @html {
Cache-Control "no-cache, no-store, must-revalidate"
}
}
# Logging
log {
output file /var/log/caddy/vidrip-access.log {
roll_size 100mb
roll_keep 10
}
format json
}
}
EOF
log_success "Caddyfile created successfully!"
log_info "You can edit it later at: $SCRIPT_DIR/Caddyfile"
else
log_info "Skipping Caddyfile generation"
log_info "You can create it manually later or use nginx/Apache"
fi
echo ""
else
log_info "Caddyfile already exists, skipping generation"
echo ""
fi
################################################################################
# Deploy Frontend to Web Root
################################################################################
log_info "Deploying frontend to web root..."
# Check if we have permissions to write to /var/www
if [ -w "/var/www" ]; then
# Create web root directory
sudo mkdir -p "$WEB_ROOT"
# Copy frontend build to web root
log_info "Copying frontend build to $WEB_ROOT..."
sudo rm -rf "$WEB_ROOT"/*
sudo cp -r "$FRONTEND_DIR/dist/"* "$WEB_ROOT/"
# Set proper permissions
sudo chown -R www-data:www-data "$WEB_ROOT" 2>/dev/null || sudo chown -R $USER:$USER "$WEB_ROOT"
sudo chmod -R 755 "$WEB_ROOT"
log_success "Frontend deployed to $WEB_ROOT"
else
log_warning "No write permission to /var/www"
log_warning "Run with sudo to deploy frontend, or manually copy:"
log_warning " sudo mkdir -p $WEB_ROOT"
log_warning " sudo cp -r $FRONTEND_DIR/dist/* $WEB_ROOT/"
log_warning " sudo chown -R www-data:www-data $WEB_ROOT"
log_warning " sudo chmod -R 755 $WEB_ROOT"
fi
echo ""
################################################################################
# Start Backend Server
################################################################################
@ -243,7 +385,14 @@ log_info "==================================================================="
log_info "VidRip is now running in production mode"
log_info "==================================================================="
echo ""
log_info "To stop the service:"
log_info "Backend Server:"
log_info " Running on port 3001 (or PORT from .env)"
log_info " PID: $SERVER_PID"
echo ""
log_info "Frontend:"
log_info " Deployed to: $WEB_ROOT"
echo ""
log_info "To stop the backend service:"
log_info " ./stop-production.sh"
echo ""
log_info "To view logs:"
@ -252,10 +401,38 @@ echo ""
log_info "To view errors:"
log_info " tail -f $ERROR_LOG_FILE"
echo ""
log_warning "IMPORTANT: In production, you'll need to:"
log_warning " 1. Serve the frontend build (frontend/dist) via nginx/Apache"
log_warning " 2. Setup reverse proxy from frontend to backend API"
log_warning " 3. Configure proper firewall rules"
if [ -f "$SCRIPT_DIR/Caddyfile" ]; then
log_warning "NEXT STEPS - Caddy Setup:"
log_warning " 1. Install Caddy: https://caddyserver.com/docs/install"
log_warning " Quick install (Ubuntu/Debian):"
log_warning " sudo apt install -y debian-keyring debian-archive-keyring apt-transport-https"
log_warning " curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/gpg.key' | sudo gpg --dearmor -o /usr/share/keyrings/caddy-stable-archive-keyring.gpg"
log_warning " curl -1sLf 'https://dl.cloudsmith.io/public/caddy/stable/debian.deb.txt' | sudo tee /etc/apt/sources.list.d/caddy-stable.list"
log_warning " sudo apt update && sudo apt install caddy"
echo ""
log_warning " 2. Copy Caddyfile to Caddy config directory:"
log_warning " sudo cp $SCRIPT_DIR/Caddyfile /etc/caddy/Caddyfile"
echo ""
log_warning " 3. Start Caddy:"
log_warning " sudo systemctl enable caddy"
log_warning " sudo systemctl start caddy"
echo ""
log_warning " 4. Configure firewall to allow ports 80/443:"
log_warning " sudo ufw allow 80/tcp"
log_warning " sudo ufw allow 443/tcp"
echo ""
log_success "Caddy will automatically obtain SSL certificates from Let's Encrypt!"
else
log_warning "NEXT STEPS:"
log_warning " 1. Install Caddy: https://caddyserver.com/docs/install"
log_warning " 2. Edit Caddyfile and replace 'your-domain.com' with your domain"
log_warning " 3. Start Caddy: sudo caddy start"
log_warning " (or for testing: caddy run)"
log_warning " 4. Configure firewall to allow ports 80/443"
fi
echo ""
log_info "Alternative web servers:"
log_info " - See DEPLOYMENT.md for nginx/Apache configurations"
echo ""
# Register cleanup on script exit