From 3eaa76174e507a3eab3933cd398193715ef26dfc Mon Sep 17 00:00:00 2001 From: Jim McBride Date: Wed, 26 Nov 2025 21:11:13 -0600 Subject: [PATCH 01/70] Feature: Automated configuration backups with scheduling - Create/Download/Upload/Restore database backups (PostgreSQL and SQLite) - Configurable data directory backups (via settings.py) - Scheduled backups (daily/weekly) via Celery Beat - Retention policy (keep last N backups) - Token-based auth for async task polling - X-Accel-Redirect support for nginx file serving - Comprehensive tests --- .dockerignore | 1 + apps/api/urls.py | 1 + apps/backups/__init__.py | 0 apps/backups/api_urls.py | 18 + apps/backups/api_views.py | 364 ++++++ apps/backups/apps.py | 7 + apps/backups/migrations/__init__.py | 0 apps/backups/models.py | 0 apps/backups/scheduler.py | 144 +++ apps/backups/services.py | 347 ++++++ apps/backups/tasks.py | 106 ++ apps/backups/tests.py | 1010 +++++++++++++++++ dispatcharr/settings.py | 7 + docker/nginx.conf | 7 + docker/uwsgi.ini | 1 + frontend/src/api.js | 177 +++ .../src/components/backups/BackupManager.jsx | 496 ++++++++ frontend/src/pages/Settings.jsx | 8 + 18 files changed, 2694 insertions(+) create mode 100644 apps/backups/__init__.py create mode 100644 apps/backups/api_urls.py create mode 100644 apps/backups/api_views.py create mode 100644 apps/backups/apps.py create mode 100644 apps/backups/migrations/__init__.py create mode 100644 apps/backups/models.py create mode 100644 apps/backups/scheduler.py create mode 100644 apps/backups/services.py create mode 100644 apps/backups/tasks.py create mode 100644 apps/backups/tests.py create mode 100644 frontend/src/components/backups/BackupManager.jsx diff --git a/.dockerignore b/.dockerignore index c79ca7b4d..296537de4 100755 --- a/.dockerignore +++ b/.dockerignore @@ -31,3 +31,4 @@ LICENSE README.md data/ +docker/data/ diff --git a/apps/api/urls.py b/apps/api/urls.py index 7d9edb523..4c92c70a4 100644 --- a/apps/api/urls.py +++ b/apps/api/urls.py @@ -27,6 +27,7 @@ path('core/', include(('core.api_urls', 'core'), namespace='core')), path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')), path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')), + path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')), # path('output/', include(('apps.output.api_urls', 'output'), namespace='output')), #path('player/', include(('apps.player.api_urls', 'player'), namespace='player')), #path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')), diff --git a/apps/backups/__init__.py b/apps/backups/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/apps/backups/api_urls.py b/apps/backups/api_urls.py new file mode 100644 index 000000000..226758cc0 --- /dev/null +++ b/apps/backups/api_urls.py @@ -0,0 +1,18 @@ +from django.urls import path + +from . import api_views + +app_name = "backups" + +urlpatterns = [ + path("", api_views.list_backups, name="backup-list"), + path("create/", api_views.create_backup, name="backup-create"), + path("upload/", api_views.upload_backup, name="backup-upload"), + path("schedule/", api_views.get_schedule, name="backup-schedule-get"), + path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"), + path("status//", api_views.backup_status, name="backup-status"), + path("/download-token/", api_views.get_download_token, name="backup-download-token"), + path("/download/", api_views.download_backup, name="backup-download"), + path("/delete/", api_views.delete_backup, name="backup-delete"), + path("/restore/", api_views.restore_backup, name="backup-restore"), +] diff --git a/apps/backups/api_views.py b/apps/backups/api_views.py new file mode 100644 index 000000000..c6ff7d269 --- /dev/null +++ b/apps/backups/api_views.py @@ -0,0 +1,364 @@ +import hashlib +import hmac +import logging +import os +from pathlib import Path + +from celery.result import AsyncResult +from django.conf import settings +from django.http import HttpResponse, StreamingHttpResponse, Http404 +from rest_framework import status +from rest_framework.decorators import api_view, permission_classes, parser_classes +from rest_framework.permissions import IsAdminUser, AllowAny +from rest_framework.parsers import MultiPartParser, FormParser +from rest_framework.response import Response + +from . import services +from .tasks import create_backup_task, restore_backup_task +from .scheduler import get_schedule_settings, update_schedule_settings + +logger = logging.getLogger(__name__) + + +def _generate_task_token(task_id: str) -> str: + """Generate a signed token for task status access without auth.""" + secret = settings.SECRET_KEY.encode() + return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32] + + +def _verify_task_token(task_id: str, token: str) -> bool: + """Verify a task token is valid.""" + expected = _generate_task_token(task_id) + return hmac.compare_digest(expected, token) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def list_backups(request): + """List all available backup files.""" + try: + backups = services.list_backups() + return Response(backups, status=status.HTTP_200_OK) + except Exception as e: + return Response( + {"detail": f"Failed to list backups: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +def create_backup(request): + """Create a new backup (async via Celery).""" + try: + task = create_backup_task.delay() + return Response( + { + "detail": "Backup started", + "task_id": task.id, + "task_token": _generate_task_token(task.id), + }, + status=status.HTTP_202_ACCEPTED, + ) + except Exception as e: + return Response( + {"detail": f"Failed to start backup: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([AllowAny]) +def backup_status(request, task_id): + """Check the status of a backup/restore task. + + Requires either: + - Valid admin authentication, OR + - Valid task_token query parameter + """ + # Check for token-based auth (for restore when session is invalidated) + token = request.query_params.get("token") + if token: + if not _verify_task_token(task_id, token): + return Response( + {"detail": "Invalid task token"}, + status=status.HTTP_403_FORBIDDEN, + ) + else: + # Fall back to admin auth check + if not request.user.is_authenticated or not request.user.is_staff: + return Response( + {"detail": "Authentication required"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + + try: + result = AsyncResult(task_id) + + if result.ready(): + task_result = result.get() + if task_result.get("status") == "completed": + return Response({ + "state": "completed", + "result": task_result, + }) + else: + return Response({ + "state": "failed", + "error": task_result.get("error", "Unknown error"), + }) + elif result.failed(): + return Response({ + "state": "failed", + "error": str(result.result), + }) + else: + return Response({ + "state": result.state.lower(), + }) + except Exception as e: + return Response( + {"detail": f"Failed to get task status: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def get_download_token(request, filename): + """Get a signed token for downloading a backup file.""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise Http404("Backup file not found") + + token = _generate_task_token(filename) + return Response({"token": token}) + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Failed to generate token: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([AllowAny]) +def download_backup(request, filename): + """Download a backup file. + + Requires either: + - Valid admin authentication, OR + - Valid download_token query parameter + """ + # Check for token-based auth (avoids CORS preflight issues) + token = request.query_params.get("token") + if token: + if not _verify_task_token(filename, token): + return Response( + {"detail": "Invalid download token"}, + status=status.HTTP_403_FORBIDDEN, + ) + else: + # Fall back to admin auth check + if not request.user.is_authenticated or not request.user.is_staff: + return Response( + {"detail": "Authentication required"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + + try: + # Security: prevent path traversal by checking for suspicious characters + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = (backup_dir / filename).resolve() + + # Security: ensure the resolved path is still within backup_dir + if not str(backup_file).startswith(str(backup_dir.resolve())): + raise Http404("Invalid filename") + + if not backup_file.exists() or not backup_file.is_file(): + raise Http404("Backup file not found") + + file_size = backup_file.stat().st_size + + # Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly + # Fall back to streaming for non-nginx deployments + use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true" + logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}") + + if use_nginx_accel: + # X-Accel-Redirect: Django returns immediately, nginx serves file + logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}") + response = HttpResponse() + response["X-Accel-Redirect"] = f"/protected-backups/{filename}" + response["Content-Type"] = "application/zip" + response["Content-Length"] = file_size + response["Content-Disposition"] = f'attachment; filename="{filename}"' + return response + else: + # Streaming fallback for non-nginx deployments + logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)") + def file_iterator(file_path, chunk_size=2 * 1024 * 1024): + with open(file_path, "rb") as f: + while chunk := f.read(chunk_size): + yield chunk + + response = StreamingHttpResponse( + file_iterator(backup_file), + content_type="application/zip", + ) + response["Content-Length"] = file_size + response["Content-Disposition"] = f'attachment; filename="{filename}"' + return response + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Download failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["DELETE"]) +@permission_classes([IsAdminUser]) +def delete_backup(request, filename): + """Delete a backup file.""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + services.delete_backup(filename) + return Response( + {"detail": "Backup deleted successfully"}, + status=status.HTTP_204_NO_CONTENT, + ) + except FileNotFoundError: + raise Http404("Backup file not found") + except Exception as e: + return Response( + {"detail": f"Delete failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +@parser_classes([MultiPartParser, FormParser]) +def upload_backup(request): + """Upload a backup file for restoration.""" + uploaded = request.FILES.get("file") + if not uploaded: + return Response( + {"detail": "No file uploaded"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + try: + backup_dir = services.get_backup_dir() + filename = uploaded.name or "uploaded-backup.zip" + + # Ensure unique filename + backup_file = backup_dir / filename + counter = 1 + while backup_file.exists(): + name_parts = filename.rsplit(".", 1) + if len(name_parts) == 2: + backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}" + else: + backup_file = backup_dir / f"{filename}-{counter}" + counter += 1 + + # Save uploaded file + with backup_file.open("wb") as f: + for chunk in uploaded.chunks(): + f.write(chunk) + + return Response( + { + "detail": "Backup uploaded successfully", + "filename": backup_file.name, + }, + status=status.HTTP_201_CREATED, + ) + except Exception as e: + return Response( + {"detail": f"Upload failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +def restore_backup(request, filename): + """Restore from a backup file (async via Celery). WARNING: This will flush the database!""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise Http404("Backup file not found") + + task = restore_backup_task.delay(filename) + return Response( + { + "detail": "Restore started", + "task_id": task.id, + "task_token": _generate_task_token(task.id), + }, + status=status.HTTP_202_ACCEPTED, + ) + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Failed to start restore: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def get_schedule(request): + """Get backup schedule settings.""" + try: + settings = get_schedule_settings() + return Response(settings) + except Exception as e: + return Response( + {"detail": f"Failed to get schedule: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["PUT"]) +@permission_classes([IsAdminUser]) +def update_schedule(request): + """Update backup schedule settings.""" + try: + settings = update_schedule_settings(request.data) + return Response(settings) + except ValueError as e: + return Response( + {"detail": str(e)}, + status=status.HTTP_400_BAD_REQUEST, + ) + except Exception as e: + return Response( + {"detail": f"Failed to update schedule: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) diff --git a/apps/backups/apps.py b/apps/backups/apps.py new file mode 100644 index 000000000..ee644149b --- /dev/null +++ b/apps/backups/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig + + +class BackupsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "apps.backups" + verbose_name = "Backups" diff --git a/apps/backups/migrations/__init__.py b/apps/backups/migrations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/apps/backups/models.py b/apps/backups/models.py new file mode 100644 index 000000000..e69de29bb diff --git a/apps/backups/scheduler.py b/apps/backups/scheduler.py new file mode 100644 index 000000000..52186e90d --- /dev/null +++ b/apps/backups/scheduler.py @@ -0,0 +1,144 @@ +import json +import logging + +from django_celery_beat.models import PeriodicTask, CrontabSchedule + +from core.models import CoreSettings + +logger = logging.getLogger(__name__) + +BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task" + +SETTING_KEYS = { + "enabled": "backup_schedule_enabled", + "frequency": "backup_schedule_frequency", + "time": "backup_schedule_time", + "day_of_week": "backup_schedule_day_of_week", + "retention_count": "backup_retention_count", +} + +DEFAULTS = { + "enabled": False, + "frequency": "daily", + "time": "03:00", + "day_of_week": 0, # Sunday + "retention_count": 0, +} + + +def _get_setting(key: str, default=None): + """Get a backup setting from CoreSettings.""" + try: + setting = CoreSettings.objects.get(key=SETTING_KEYS[key]) + value = setting.value + if key == "enabled": + return value.lower() == "true" + elif key in ("day_of_week", "retention_count"): + return int(value) + return value + except CoreSettings.DoesNotExist: + return default if default is not None else DEFAULTS.get(key) + + +def _set_setting(key: str, value) -> None: + """Set a backup setting in CoreSettings.""" + str_value = str(value).lower() if isinstance(value, bool) else str(value) + CoreSettings.objects.update_or_create( + key=SETTING_KEYS[key], + defaults={ + "name": f"Backup {key.replace('_', ' ').title()}", + "value": str_value, + }, + ) + + +def get_schedule_settings() -> dict: + """Get all backup schedule settings.""" + return { + "enabled": _get_setting("enabled"), + "frequency": _get_setting("frequency"), + "time": _get_setting("time"), + "day_of_week": _get_setting("day_of_week"), + "retention_count": _get_setting("retention_count"), + } + + +def update_schedule_settings(data: dict) -> dict: + """Update backup schedule settings and sync the PeriodicTask.""" + # Validate + if "frequency" in data and data["frequency"] not in ("daily", "weekly"): + raise ValueError("frequency must be 'daily' or 'weekly'") + + if "time" in data: + try: + hour, minute = data["time"].split(":") + int(hour) + int(minute) + except (ValueError, AttributeError): + raise ValueError("time must be in HH:MM format") + + if "day_of_week" in data: + day = int(data["day_of_week"]) + if day < 0 or day > 6: + raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)") + + if "retention_count" in data: + count = int(data["retention_count"]) + if count < 0: + raise ValueError("retention_count must be >= 0") + + # Update settings + for key in ("enabled", "frequency", "time", "day_of_week", "retention_count"): + if key in data: + _set_setting(key, data[key]) + + # Sync the periodic task + _sync_periodic_task() + + return get_schedule_settings() + + +def _sync_periodic_task() -> None: + """Create, update, or delete the scheduled backup task based on settings.""" + settings = get_schedule_settings() + + if not settings["enabled"]: + # Delete the task if it exists + PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).delete() + logger.info("Backup schedule disabled, removed periodic task") + return + + # Parse time + hour, minute = settings["time"].split(":") + + # Build crontab based on frequency + if settings["frequency"] == "daily": + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week="*", + day_of_month="*", + month_of_year="*", + ) + else: # weekly + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=str(settings["day_of_week"]), + day_of_month="*", + month_of_year="*", + ) + + # Create or update the periodic task + task, created = PeriodicTask.objects.update_or_create( + name=BACKUP_SCHEDULE_TASK_NAME, + defaults={ + "task": "apps.backups.tasks.scheduled_backup_task", + "crontab": crontab, + "enabled": True, + "kwargs": json.dumps({"retention_count": settings["retention_count"]}), + }, + ) + + action = "Created" if created else "Updated" + logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}") diff --git a/apps/backups/services.py b/apps/backups/services.py new file mode 100644 index 000000000..968384171 --- /dev/null +++ b/apps/backups/services.py @@ -0,0 +1,347 @@ +import datetime +import json +import os +import shutil +import subprocess +import tempfile +from pathlib import Path +from zipfile import ZipFile, ZIP_DEFLATED +import logging + +from django.conf import settings + +logger = logging.getLogger(__name__) + + +def get_backup_dir() -> Path: + """Get the backup directory, creating it if necessary.""" + backup_dir = Path(settings.BACKUP_ROOT) + backup_dir.mkdir(parents=True, exist_ok=True) + return backup_dir + + +def get_data_dirs() -> list[Path]: + """Get list of data directories to include in backups.""" + dirs = getattr(settings, "BACKUP_DATA_DIRS", []) + return [Path(d) for d in dirs if d and Path(d).exists()] + + +def _is_postgresql() -> bool: + """Check if we're using PostgreSQL.""" + return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql" + + +def _get_pg_env() -> dict: + """Get environment variables for PostgreSQL commands.""" + db_config = settings.DATABASES["default"] + env = os.environ.copy() + env["PGPASSWORD"] = db_config.get("PASSWORD", "") + return env + + +def _get_pg_args() -> list[str]: + """Get common PostgreSQL command arguments.""" + db_config = settings.DATABASES["default"] + return [ + "-h", db_config.get("HOST", "localhost"), + "-p", str(db_config.get("PORT", 5432)), + "-U", db_config.get("USER", "postgres"), + "-d", db_config.get("NAME", "dispatcharr"), + ] + + +def _dump_postgresql(output_file: Path) -> None: + """Dump PostgreSQL database using pg_dump.""" + logger.info("Dumping PostgreSQL database with pg_dump...") + + cmd = [ + "pg_dump", + *_get_pg_args(), + "-Fc", # Custom format for pg_restore + "-v", # Verbose + "-f", str(output_file), + ] + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"pg_dump failed: {result.stderr}") + raise RuntimeError(f"pg_dump failed: {result.stderr}") + + logger.debug(f"pg_dump output: {result.stderr}") + + +def _restore_postgresql(dump_file: Path) -> None: + """Restore PostgreSQL database using pg_restore.""" + logger.info("[PG_RESTORE] Starting pg_restore...") + logger.info(f"[PG_RESTORE] Dump file: {dump_file}") + + pg_args = _get_pg_args() + logger.info(f"[PG_RESTORE] Connection args: {pg_args}") + + cmd = [ + "pg_restore", + "--clean", # Clean (drop) database objects before recreating + *pg_args, + "-v", # Verbose + str(dump_file), + ] + + logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}") + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + logger.info(f"[PG_RESTORE] Return code: {result.returncode}") + + # pg_restore may return non-zero even on partial success + # Check for actual errors vs warnings + if result.returncode != 0: + # Some errors during restore are expected (e.g., "does not exist" when cleaning) + # Only fail on critical errors + stderr = result.stderr.lower() + if "fatal" in stderr or "could not connect" in stderr: + logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}") + raise RuntimeError(f"pg_restore failed: {result.stderr}") + else: + logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...") + + logger.info("[PG_RESTORE] Completed successfully") + + +def _dump_sqlite(output_file: Path) -> None: + """Dump SQLite database using sqlite3 .backup command.""" + logger.info("Dumping SQLite database with sqlite3 .backup...") + db_path = Path(settings.DATABASES["default"]["NAME"]) + + if not db_path.exists(): + raise FileNotFoundError(f"SQLite database not found: {db_path}") + + # Use sqlite3 .backup command via stdin for reliable execution + result = subprocess.run( + ["sqlite3", str(db_path)], + input=f".backup '{output_file}'\n", + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"sqlite3 backup failed: {result.stderr}") + raise RuntimeError(f"sqlite3 backup failed: {result.stderr}") + + # Verify the backup file was created + if not output_file.exists(): + raise RuntimeError("sqlite3 backup failed: output file not created") + + logger.info(f"sqlite3 backup completed successfully: {output_file}") + + +def _restore_sqlite(dump_file: Path) -> None: + """Restore SQLite database by replacing the database file.""" + logger.info("Restoring SQLite database...") + db_path = Path(settings.DATABASES["default"]["NAME"]) + backup_current = None + + # Backup current database before overwriting + if db_path.exists(): + backup_current = db_path.with_suffix(".db.bak") + shutil.copy2(db_path, backup_current) + logger.info(f"Backed up current database to {backup_current}") + + # Ensure parent directory exists + db_path.parent.mkdir(parents=True, exist_ok=True) + + # The backup file from _dump_sqlite is a complete SQLite database file + # We can simply copy it over the existing database + shutil.copy2(dump_file, db_path) + + # Verify the restore worked by checking if sqlite3 can read it + result = subprocess.run( + ["sqlite3", str(db_path)], + input=".tables\n", + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"sqlite3 verification failed: {result.stderr}") + # Try to restore from backup + if backup_current and backup_current.exists(): + shutil.copy2(backup_current, db_path) + logger.info("Restored original database from backup") + raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}") + + logger.info("sqlite3 restore completed successfully") + + +def create_backup() -> Path: + """ + Create a backup archive containing database dump and data directories. + Returns the path to the created backup file. + """ + backup_dir = get_backup_dir() + timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S") + backup_name = f"dispatcharr-backup-{timestamp}.zip" + backup_file = backup_dir / backup_name + + logger.info(f"Creating backup: {backup_name}") + + with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir: + temp_path = Path(temp_dir) + + # Determine database type and dump accordingly + if _is_postgresql(): + db_dump_file = temp_path / "database.dump" + _dump_postgresql(db_dump_file) + db_type = "postgresql" + else: + db_dump_file = temp_path / "database.sqlite3" + _dump_sqlite(db_dump_file) + db_type = "sqlite" + + # Create ZIP archive with compression and ZIP64 support for large files + with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file: + # Add database dump + zip_file.write(db_dump_file, db_dump_file.name) + + # Add metadata + metadata = { + "format": "dispatcharr-backup", + "version": 2, + "database_type": db_type, + "database_file": db_dump_file.name, + "created_at": datetime.datetime.now(datetime.UTC).isoformat(), + } + zip_file.writestr("metadata.json", json.dumps(metadata, indent=2)) + + # Add data directories + for data_dir in get_data_dirs(): + logger.debug(f"Adding directory: {data_dir}") + for file_path in data_dir.rglob("*"): + if file_path.is_file(): + arcname = f"data/{data_dir.name}/{file_path.relative_to(data_dir)}" + zip_file.write(file_path, arcname) + + logger.info(f"Backup created successfully: {backup_file}") + return backup_file + + +def restore_backup(backup_file: Path) -> None: + """ + Restore from a backup archive. + WARNING: This will overwrite the database! + """ + if not backup_file.exists(): + raise FileNotFoundError(f"Backup file not found: {backup_file}") + + logger.info(f"Restoring from backup: {backup_file}") + + with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir: + temp_path = Path(temp_dir) + + # Extract backup + logger.debug("Extracting backup archive...") + with ZipFile(backup_file, "r") as zip_file: + zip_file.extractall(temp_path) + + # Read metadata + metadata_file = temp_path / "metadata.json" + if not metadata_file.exists(): + raise ValueError("Invalid backup: missing metadata.json") + + with open(metadata_file) as f: + metadata = json.load(f) + + # Restore database + _restore_database(temp_path, metadata) + + # Restore data directories + data_root = temp_path / "data" + if data_root.exists(): + logger.info("Restoring data directories...") + for extracted_dir in data_root.iterdir(): + if not extracted_dir.is_dir(): + continue + + target_name = extracted_dir.name + data_dirs = get_data_dirs() + matching = [d for d in data_dirs if d.name == target_name] + + if not matching: + logger.warning(f"No configured directory for {target_name}, skipping") + continue + + target = matching[0] + logger.debug(f"Restoring {target_name} to {target}") + + # Create parent directory if needed + target.parent.mkdir(parents=True, exist_ok=True) + + # Remove existing and copy from backup + if target.exists(): + shutil.rmtree(target) + shutil.copytree(extracted_dir, target) + + logger.info("Restore completed successfully") + + +def _restore_database(temp_path: Path, metadata: dict) -> None: + """Restore database from backup.""" + db_type = metadata.get("database_type", "postgresql") + db_file = metadata.get("database_file", "database.dump") + dump_file = temp_path / db_file + + if not dump_file.exists(): + raise ValueError(f"Invalid backup: missing {db_file}") + + current_db_type = "postgresql" if _is_postgresql() else "sqlite" + + if db_type != current_db_type: + raise ValueError( + f"Database type mismatch: backup is {db_type}, " + f"but current database is {current_db_type}" + ) + + if db_type == "postgresql": + _restore_postgresql(dump_file) + else: + _restore_sqlite(dump_file) + + +def list_backups() -> list[dict]: + """List all available backup files with metadata.""" + backup_dir = get_backup_dir() + backups = [] + + for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True): + backups.append({ + "name": backup_file.name, + "size": backup_file.stat().st_size, + "created": datetime.datetime.fromtimestamp(backup_file.stat().st_mtime).isoformat(), + }) + + return backups + + +def delete_backup(filename: str) -> None: + """Delete a backup file.""" + backup_dir = get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise FileNotFoundError(f"Backup file not found: {filename}") + + if not backup_file.is_file(): + raise ValueError(f"Invalid backup file: {filename}") + + backup_file.unlink() + logger.info(f"Deleted backup: {filename}") diff --git a/apps/backups/tasks.py b/apps/backups/tasks.py new file mode 100644 index 000000000..f531fef81 --- /dev/null +++ b/apps/backups/tasks.py @@ -0,0 +1,106 @@ +import logging +import traceback +from celery import shared_task + +from . import services + +logger = logging.getLogger(__name__) + + +def _cleanup_old_backups(retention_count: int) -> int: + """Delete old backups, keeping only the most recent N. Returns count deleted.""" + if retention_count <= 0: + return 0 + + backups = services.list_backups() + if len(backups) <= retention_count: + return 0 + + # Backups are sorted newest first, so delete from the end + to_delete = backups[retention_count:] + deleted = 0 + + for backup in to_delete: + try: + services.delete_backup(backup["name"]) + deleted += 1 + logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}") + except Exception as e: + logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}") + + return deleted + + +@shared_task(bind=True) +def create_backup_task(self): + """Celery task to create a backup asynchronously.""" + try: + logger.info(f"[BACKUP] Starting backup task {self.request.id}") + backup_file = services.create_backup() + logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}") + return { + "status": "completed", + "filename": backup_file.name, + "size": backup_file.stat().st_size, + } + except Exception as e: + logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}") + logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } + + +@shared_task(bind=True) +def restore_backup_task(self, filename: str): + """Celery task to restore a backup asynchronously.""" + try: + logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}") + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + logger.info(f"[RESTORE] Backup file path: {backup_file}") + services.restore_backup(backup_file) + logger.info(f"[RESTORE] Task {self.request.id} completed successfully") + return { + "status": "completed", + "filename": filename, + } + except Exception as e: + logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}") + logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } + + +@shared_task(bind=True) +def scheduled_backup_task(self, retention_count: int = 0): + """Celery task for scheduled backups with optional retention cleanup.""" + try: + logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}") + + # Create backup + backup_file = services.create_backup() + logger.info(f"[SCHEDULED] Backup created: {backup_file.name}") + + # Cleanup old backups if retention is set + deleted = 0 + if retention_count > 0: + deleted = _cleanup_old_backups(retention_count) + logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)") + + return { + "status": "completed", + "filename": backup_file.name, + "size": backup_file.stat().st_size, + "deleted_count": deleted, + } + except Exception as e: + logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}") + logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } diff --git a/apps/backups/tests.py b/apps/backups/tests.py new file mode 100644 index 000000000..a06bb7d20 --- /dev/null +++ b/apps/backups/tests.py @@ -0,0 +1,1010 @@ +import json +import tempfile +from io import BytesIO +from pathlib import Path +from zipfile import ZipFile +from unittest.mock import patch, MagicMock + +from django.test import TestCase +from django.contrib.auth import get_user_model +from rest_framework.test import APIClient +from rest_framework_simplejwt.tokens import RefreshToken + +from . import services + +User = get_user_model() + + +class BackupServicesTestCase(TestCase): + """Test cases for backup services""" + + def setUp(self): + self.temp_backup_dir = tempfile.mkdtemp() + self.temp_data_dir = tempfile.mkdtemp() + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + if Path(self.temp_data_dir).exists(): + shutil.rmtree(self.temp_data_dir) + + @patch('apps.backups.services.settings') + def test_get_backup_dir_creates_directory(self, mock_settings): + """Test that get_backup_dir creates the directory if it doesn't exist""" + mock_settings.BACKUP_ROOT = self.temp_backup_dir + + with patch('apps.backups.services.Path') as mock_path: + mock_path_instance = MagicMock() + mock_path_instance.mkdir = MagicMock() + mock_path.return_value = mock_path_instance + + services.get_backup_dir() + mock_path_instance.mkdir.assert_called_once_with(parents=True, exist_ok=True) + + @patch('apps.backups.services.settings') + def test_get_data_dirs_with_empty_config(self, mock_settings): + """Test that get_data_dirs returns empty list when no dirs configured""" + mock_settings.BACKUP_DATA_DIRS = [] + result = services.get_data_dirs() + self.assertEqual(result, []) + + @patch('apps.backups.services.settings') + def test_get_data_dirs_filters_nonexistent(self, mock_settings): + """Test that get_data_dirs filters out non-existent directories""" + nonexistent_dir = '/tmp/does-not-exist-12345' + mock_settings.BACKUP_DATA_DIRS = [self.temp_data_dir, nonexistent_dir] + + result = services.get_data_dirs() + self.assertEqual(len(result), 1) + self.assertEqual(str(result[0]), self.temp_data_dir) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services.get_data_dirs') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._dump_sqlite') + def test_create_backup_success_sqlite(self, mock_dump_sqlite, mock_is_pg, mock_get_data_dirs, mock_get_backup_dir): + """Test successful backup creation with SQLite""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + mock_get_data_dirs.return_value = [] + mock_is_pg.return_value = False + + # Mock SQLite dump to create a temp file + def mock_dump(output_file): + output_file.write_text("sqlite dump") + + mock_dump_sqlite.side_effect = mock_dump + + result = services.create_backup() + + self.assertIsInstance(result, Path) + self.assertTrue(result.exists()) + self.assertTrue(result.name.startswith('dispatcharr-backup-')) + self.assertTrue(result.name.endswith('.zip')) + + # Verify the backup contains expected files + with ZipFile(result, 'r') as zf: + names = zf.namelist() + self.assertIn('database.sqlite3', names) + self.assertIn('metadata.json', names) + + # Check metadata + metadata = json.loads(zf.read('metadata.json')) + self.assertEqual(metadata['version'], 2) + self.assertEqual(metadata['database_type'], 'sqlite') + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services.get_data_dirs') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._dump_postgresql') + def test_create_backup_success_postgresql(self, mock_dump_pg, mock_is_pg, mock_get_data_dirs, mock_get_backup_dir): + """Test successful backup creation with PostgreSQL""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + mock_get_data_dirs.return_value = [] + mock_is_pg.return_value = True + + # Mock PostgreSQL dump to create a temp file + def mock_dump(output_file): + output_file.write_bytes(b"pg dump data") + + mock_dump_pg.side_effect = mock_dump + + result = services.create_backup() + + self.assertIsInstance(result, Path) + self.assertTrue(result.exists()) + + # Verify the backup contains expected files + with ZipFile(result, 'r') as zf: + names = zf.namelist() + self.assertIn('database.dump', names) + self.assertIn('metadata.json', names) + + # Check metadata + metadata = json.loads(zf.read('metadata.json')) + self.assertEqual(metadata['version'], 2) + self.assertEqual(metadata['database_type'], 'postgresql') + + @patch('apps.backups.services.get_backup_dir') + def test_list_backups_empty(self, mock_get_backup_dir): + """Test listing backups when none exist""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + result = services.list_backups() + + self.assertEqual(result, []) + + @patch('apps.backups.services.get_backup_dir') + def test_list_backups_with_files(self, mock_get_backup_dir): + """Test listing backups with existing backup files""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a fake backup file + test_backup = backup_dir / "dispatcharr-backup-2025.01.01.12.00.00.zip" + test_backup.write_text("fake backup content") + + result = services.list_backups() + + self.assertEqual(len(result), 1) + self.assertEqual(result[0]['name'], test_backup.name) + self.assertIn('size', result[0]) + self.assertIn('created', result[0]) + + @patch('apps.backups.services.get_backup_dir') + def test_delete_backup_success(self, mock_get_backup_dir): + """Test successful backup deletion""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a fake backup file + test_backup = backup_dir / "dispatcharr-backup-test.zip" + test_backup.write_text("fake backup content") + + self.assertTrue(test_backup.exists()) + + services.delete_backup(test_backup.name) + + self.assertFalse(test_backup.exists()) + + @patch('apps.backups.services.get_backup_dir') + def test_delete_backup_not_found(self, mock_get_backup_dir): + """Test deleting a non-existent backup raises error""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + with self.assertRaises(FileNotFoundError): + services.delete_backup("nonexistent-backup.zip") + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services.get_data_dirs') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._restore_postgresql') + def test_restore_backup_postgresql(self, mock_restore_pg, mock_is_pg, mock_get_data_dirs, mock_get_backup_dir): + """Test successful restoration of PostgreSQL backup""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_get_data_dirs.return_value = [] + mock_is_pg.return_value = True + + # Create PostgreSQL backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.dump', b'pg dump data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'postgresql', + 'database_file': 'database.dump' + })) + + services.restore_backup(backup_file) + + mock_restore_pg.assert_called_once() + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services.get_data_dirs') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._restore_sqlite') + def test_restore_backup_sqlite(self, mock_restore_sqlite, mock_is_pg, mock_get_data_dirs, mock_get_backup_dir): + """Test successful restoration of SQLite backup""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_get_data_dirs.return_value = [] + mock_is_pg.return_value = False + + # Create SQLite backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.sqlite3', 'sqlite data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'sqlite', + 'database_file': 'database.sqlite3' + })) + + services.restore_backup(backup_file) + + mock_restore_sqlite.assert_called_once() + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services.get_data_dirs') + @patch('apps.backups.services._is_postgresql') + def test_restore_backup_database_type_mismatch(self, mock_is_pg, mock_get_data_dirs, mock_get_backup_dir): + """Test restore fails when database type doesn't match""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_get_data_dirs.return_value = [] + mock_is_pg.return_value = True # Current system is PostgreSQL + + # Create SQLite backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.sqlite3', 'sqlite data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'sqlite', # Backup is SQLite + 'database_file': 'database.sqlite3' + })) + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('mismatch', str(context.exception).lower()) + + def test_restore_backup_not_found(self): + """Test restoring from non-existent backup file""" + fake_path = Path("/tmp/nonexistent-backup-12345.zip") + + with self.assertRaises(FileNotFoundError): + services.restore_backup(fake_path) + + @patch('apps.backups.services.get_backup_dir') + def test_restore_backup_missing_metadata(self, mock_get_backup_dir): + """Test restoring from backup without metadata.json""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a backup file missing metadata.json + backup_file = backup_dir / "invalid-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.dump', b'fake dump data') + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('metadata.json', str(context.exception)) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + def test_restore_backup_missing_database(self, mock_is_pg, mock_get_backup_dir): + """Test restoring from backup missing database dump""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True + + # Create backup file missing database dump + backup_file = backup_dir / "invalid-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'postgresql', + 'database_file': 'database.dump' + })) + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('database.dump', str(context.exception)) + + +class BackupAPITestCase(TestCase): + """Test cases for backup API endpoints""" + + def setUp(self): + self.client = APIClient() + self.user = User.objects.create_user( + username='testuser', + email='test@example.com', + password='testpass123' + ) + self.admin_user = User.objects.create_superuser( + username='admin', + email='admin@example.com', + password='adminpass123' + ) + self.temp_backup_dir = tempfile.mkdtemp() + + def get_auth_header(self, user): + """Helper method to get JWT auth header for a user""" + refresh = RefreshToken.for_user(user) + return f'Bearer {str(refresh.access_token)}' + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + def test_list_backups_requires_admin(self): + """Test that listing backups requires admin privileges""" + url = '/api/backups/' + + # Unauthenticated request + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.services.list_backups') + def test_list_backups_success(self, mock_list_backups): + """Test successful backup listing""" + mock_list_backups.return_value = [ + { + 'name': 'backup-test.zip', + 'size': 1024, + 'created': '2025-01-01T12:00:00' + } + ] + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(len(data), 1) + self.assertEqual(data[0]['name'], 'backup-test.zip') + + def test_create_backup_requires_admin(self): + """Test that creating backups requires admin privileges""" + url = '/api/backups/create/' + + # Unauthenticated request + response = self.client.post(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.post(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.tasks.create_backup_task.delay') + def test_create_backup_success(self, mock_create_task): + """Test successful backup creation via API (async task)""" + mock_task = MagicMock() + mock_task.id = 'test-task-id-123' + mock_create_task.return_value = mock_task + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/create/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 202) + data = response.json() + self.assertIn('task_id', data) + self.assertIn('task_token', data) + self.assertEqual(data['task_id'], 'test-task-id-123') + + @patch('apps.backups.tasks.create_backup_task.delay') + def test_create_backup_failure(self, mock_create_task): + """Test backup creation failure handling""" + mock_create_task.side_effect = Exception("Failed to start task") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/create/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 500) + data = response.json() + self.assertIn('detail', data) + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_success(self, mock_get_backup_dir): + """Test successful backup download""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/download/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + self.assertEqual(response['Content-Type'], 'application/zip') + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_not_found(self, mock_get_backup_dir): + """Test downloading non-existent backup""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/download/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + @patch('apps.backups.services.delete_backup') + def test_delete_backup_success(self, mock_delete_backup): + """Test successful backup deletion via API""" + mock_delete_backup.return_value = None + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/delete/' + response = self.client.delete(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 204) + mock_delete_backup.assert_called_once_with('test-backup.zip') + + @patch('apps.backups.services.delete_backup') + def test_delete_backup_not_found(self, mock_delete_backup): + """Test deleting non-existent backup via API""" + mock_delete_backup.side_effect = FileNotFoundError("Not found") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/delete/' + response = self.client.delete(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + def test_upload_backup_requires_file(self): + """Test that upload requires a file""" + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/upload/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 400) + data = response.json() + self.assertIn('No file uploaded', data['detail']) + + @patch('apps.backups.services.get_backup_dir') + def test_upload_backup_success(self, mock_get_backup_dir): + """Test successful backup upload""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + # Create a fake backup file + fake_backup = BytesIO(b"fake backup content") + fake_backup.name = 'uploaded-backup.zip' + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/upload/' + response = self.client.post(url, {'file': fake_backup}, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 201) + data = response.json() + self.assertIn('filename', data) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.tasks.restore_backup_task.delay') + def test_restore_backup_success(self, mock_restore_task, mock_get_backup_dir): + """Test successful backup restoration via API (async task)""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + mock_task = MagicMock() + mock_task.id = 'test-restore-task-456' + mock_restore_task.return_value = mock_task + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 202) + data = response.json() + self.assertIn('task_id', data) + self.assertIn('task_token', data) + self.assertEqual(data['task_id'], 'test-restore-task-456') + + @patch('apps.backups.services.get_backup_dir') + def test_restore_backup_not_found(self, mock_get_backup_dir): + """Test restoring from non-existent backup via API""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + # --- Backup Status Endpoint Tests --- + + def test_backup_status_requires_auth_or_token(self): + """Test that backup_status requires auth or valid token""" + url = '/api/backups/status/fake-task-id/' + + # Unauthenticated request without token + response = self.client.get(url) + self.assertEqual(response.status_code, 401) + + def test_backup_status_invalid_token(self): + """Test that backup_status rejects invalid tokens""" + url = '/api/backups/status/fake-task-id/?token=invalid-token' + response = self.client.get(url) + self.assertEqual(response.status_code, 403) + + @patch('apps.backups.api_views.AsyncResult') + def test_backup_status_with_admin_auth(self, mock_async_result): + """Test backup_status with admin authentication""" + mock_result = MagicMock() + mock_result.ready.return_value = False + mock_result.failed.return_value = False + mock_result.state = 'PENDING' + mock_async_result.return_value = mock_result + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/status/test-task-id/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'pending') + + @patch('apps.backups.api_views.AsyncResult') + @patch('apps.backups.api_views._verify_task_token') + def test_backup_status_with_valid_token(self, mock_verify, mock_async_result): + """Test backup_status with valid token""" + mock_verify.return_value = True + mock_result = MagicMock() + mock_result.ready.return_value = True + mock_result.get.return_value = {'status': 'completed', 'filename': 'test.zip'} + mock_async_result.return_value = mock_result + + url = '/api/backups/status/test-task-id/?token=valid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'completed') + + @patch('apps.backups.api_views.AsyncResult') + def test_backup_status_task_failed(self, mock_async_result): + """Test backup_status when task failed""" + mock_result = MagicMock() + mock_result.ready.return_value = True + mock_result.get.return_value = {'status': 'failed', 'error': 'Something went wrong'} + mock_async_result.return_value = mock_result + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/status/test-task-id/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'failed') + self.assertIn('Something went wrong', data['error']) + + # --- Download Token Endpoint Tests --- + + def test_get_download_token_requires_admin(self): + """Test that get_download_token requires admin privileges""" + url = '/api/backups/test.zip/download-token/' + + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.services.get_backup_dir') + def test_get_download_token_success(self, mock_get_backup_dir): + """Test successful download token generation""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/download-token/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertIn('token', data) + self.assertEqual(len(data['token']), 32) + + @patch('apps.backups.services.get_backup_dir') + def test_get_download_token_not_found(self, mock_get_backup_dir): + """Test download token for non-existent file""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/download-token/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + # --- Download with Token Auth Tests --- + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.api_views._verify_task_token') + def test_download_backup_with_valid_token(self, mock_verify, mock_get_backup_dir): + """Test downloading backup with valid token (no auth header)""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_verify.return_value = True + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + url = '/api/backups/test-backup.zip/download/?token=valid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 200) + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_invalid_token(self, mock_get_backup_dir): + """Test downloading backup with invalid token""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + url = '/api/backups/test-backup.zip/download/?token=invalid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 403) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.tasks.restore_backup_task.delay') + def test_restore_backup_task_start_failure(self, mock_restore_task, mock_get_backup_dir): + """Test restore task start failure via API""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_restore_task.side_effect = Exception("Failed to start restore task") + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 500) + data = response.json() + self.assertIn('detail', data) + + def test_get_schedule_requires_admin(self): + """Test that getting schedule requires admin privileges""" + url = '/api/backups/schedule/' + + # Unauthenticated request + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.api_views.get_schedule_settings') + def test_get_schedule_success(self, mock_get_settings): + """Test successful schedule retrieval""" + mock_get_settings.return_value = { + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + 'day_of_week': 0, + 'retention_count': 5, + } + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['enabled'], True) + self.assertEqual(data['frequency'], 'daily') + self.assertEqual(data['retention_count'], 5) + + def test_update_schedule_requires_admin(self): + """Test that updating schedule requires admin privileges""" + url = '/api/backups/schedule/update/' + + # Unauthenticated request + response = self.client.put(url, {}, content_type='application/json') + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.put( + url, + {}, + content_type='application/json', + HTTP_AUTHORIZATION=self.get_auth_header(self.user) + ) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.api_views.update_schedule_settings') + def test_update_schedule_success(self, mock_update_settings): + """Test successful schedule update""" + mock_update_settings.return_value = { + 'enabled': True, + 'frequency': 'weekly', + 'time': '02:00', + 'day_of_week': 1, + 'retention_count': 10, + } + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/update/' + response = self.client.put( + url, + {'enabled': True, 'frequency': 'weekly', 'time': '02:00', 'day_of_week': 1, 'retention_count': 10}, + content_type='application/json', + HTTP_AUTHORIZATION=auth_header + ) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['frequency'], 'weekly') + self.assertEqual(data['day_of_week'], 1) + + @patch('apps.backups.api_views.update_schedule_settings') + def test_update_schedule_validation_error(self, mock_update_settings): + """Test schedule update with invalid data""" + mock_update_settings.side_effect = ValueError("frequency must be 'daily' or 'weekly'") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/update/' + response = self.client.put( + url, + {'frequency': 'invalid'}, + content_type='application/json', + HTTP_AUTHORIZATION=auth_header + ) + + self.assertEqual(response.status_code, 400) + data = response.json() + self.assertIn('frequency', data['detail']) + + +class BackupSchedulerTestCase(TestCase): + """Test cases for backup scheduler""" + + def setUp(self): + from core.models import CoreSettings + # Clean up any existing settings + CoreSettings.objects.filter(key__startswith='backup_').delete() + + def tearDown(self): + from core.models import CoreSettings + from django_celery_beat.models import PeriodicTask + CoreSettings.objects.filter(key__startswith='backup_').delete() + PeriodicTask.objects.filter(name='backup-scheduled-task').delete() + + def test_get_schedule_settings_defaults(self): + """Test that get_schedule_settings returns defaults when no settings exist""" + from . import scheduler + + settings = scheduler.get_schedule_settings() + + self.assertEqual(settings['enabled'], False) + self.assertEqual(settings['frequency'], 'daily') + self.assertEqual(settings['time'], '03:00') + self.assertEqual(settings['day_of_week'], 0) + self.assertEqual(settings['retention_count'], 0) + + def test_update_schedule_settings_stores_values(self): + """Test that update_schedule_settings stores values correctly""" + from . import scheduler + + result = scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'time': '04:30', + 'day_of_week': 3, + 'retention_count': 7, + }) + + self.assertEqual(result['enabled'], True) + self.assertEqual(result['frequency'], 'weekly') + self.assertEqual(result['time'], '04:30') + self.assertEqual(result['day_of_week'], 3) + self.assertEqual(result['retention_count'], 7) + + # Verify persistence + settings = scheduler.get_schedule_settings() + self.assertEqual(settings['enabled'], True) + self.assertEqual(settings['frequency'], 'weekly') + + def test_update_schedule_settings_invalid_frequency(self): + """Test that invalid frequency raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'frequency': 'monthly'}) + + self.assertIn('frequency', str(context.exception).lower()) + + def test_update_schedule_settings_invalid_time(self): + """Test that invalid time raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'time': 'invalid'}) + + self.assertIn('HH:MM', str(context.exception)) + + def test_update_schedule_settings_invalid_day_of_week(self): + """Test that invalid day_of_week raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'day_of_week': 7}) + + self.assertIn('day_of_week', str(context.exception).lower()) + + def test_update_schedule_settings_invalid_retention(self): + """Test that negative retention_count raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'retention_count': -1}) + + self.assertIn('retention_count', str(context.exception).lower()) + + def test_sync_creates_periodic_task_when_enabled(self): + """Test that enabling schedule creates a PeriodicTask""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '05:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertTrue(task.enabled) + self.assertEqual(task.crontab.hour, '05') + self.assertEqual(task.crontab.minute, '00') + + def test_sync_deletes_periodic_task_when_disabled(self): + """Test that disabling schedule removes PeriodicTask""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + # First enable + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '05:00', + }) + + self.assertTrue(PeriodicTask.objects.filter(name='backup-scheduled-task').exists()) + + # Then disable + scheduler.update_schedule_settings({'enabled': False}) + + self.assertFalse(PeriodicTask.objects.filter(name='backup-scheduled-task').exists()) + + def test_weekly_schedule_sets_day_of_week(self): + """Test that weekly schedule sets correct day_of_week in crontab""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'time': '06:00', + 'day_of_week': 3, # Wednesday + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.day_of_week, '3') + + +class BackupTasksTestCase(TestCase): + """Test cases for backup Celery tasks""" + + def setUp(self): + self.temp_backup_dir = tempfile.mkdtemp() + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_keeps_recent(self, mock_delete, mock_list): + """Test that cleanup keeps the most recent backups""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-3.zip'}, # newest + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, # oldest + ] + + deleted = _cleanup_old_backups(retention_count=2) + + self.assertEqual(deleted, 1) + mock_delete.assert_called_once_with('backup-1.zip') + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_does_nothing_when_under_limit(self, mock_delete, mock_list): + """Test that cleanup does nothing when under retention limit""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, + ] + + deleted = _cleanup_old_backups(retention_count=5) + + self.assertEqual(deleted, 0) + mock_delete.assert_not_called() + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_zero_retention_keeps_all(self, mock_delete, mock_list): + """Test that retention_count=0 keeps all backups""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-3.zip'}, + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, + ] + + deleted = _cleanup_old_backups(retention_count=0) + + self.assertEqual(deleted, 0) + mock_delete.assert_not_called() + + @patch('apps.backups.tasks.services.create_backup') + @patch('apps.backups.tasks._cleanup_old_backups') + def test_scheduled_backup_task_success(self, mock_cleanup, mock_create): + """Test scheduled backup task success""" + from .tasks import scheduled_backup_task + + mock_backup_file = MagicMock() + mock_backup_file.name = 'scheduled-backup.zip' + mock_backup_file.stat.return_value.st_size = 1024 + mock_create.return_value = mock_backup_file + mock_cleanup.return_value = 2 + + result = scheduled_backup_task(retention_count=5) + + self.assertEqual(result['status'], 'completed') + self.assertEqual(result['filename'], 'scheduled-backup.zip') + self.assertEqual(result['size'], 1024) + self.assertEqual(result['deleted_count'], 2) + mock_cleanup.assert_called_once_with(5) + + @patch('apps.backups.tasks.services.create_backup') + @patch('apps.backups.tasks._cleanup_old_backups') + def test_scheduled_backup_task_no_cleanup_when_retention_zero(self, mock_cleanup, mock_create): + """Test scheduled backup skips cleanup when retention is 0""" + from .tasks import scheduled_backup_task + + mock_backup_file = MagicMock() + mock_backup_file.name = 'scheduled-backup.zip' + mock_backup_file.stat.return_value.st_size = 1024 + mock_create.return_value = mock_backup_file + + result = scheduled_backup_task(retention_count=0) + + self.assertEqual(result['status'], 'completed') + self.assertEqual(result['deleted_count'], 0) + mock_cleanup.assert_not_called() + + @patch('apps.backups.tasks.services.create_backup') + def test_scheduled_backup_task_failure(self, mock_create): + """Test scheduled backup task handles failure""" + from .tasks import scheduled_backup_task + + mock_create.side_effect = Exception("Backup failed") + + result = scheduled_backup_task(retention_count=5) + + self.assertEqual(result['status'], 'failed') + self.assertIn('Backup failed', result['error']) diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index 289c67942..39f814db9 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -216,6 +216,13 @@ MEDIA_ROOT = BASE_DIR / "media" MEDIA_URL = "/media/" +# Backup settings +BACKUP_ROOT = os.environ.get("BACKUP_ROOT", "/data/backups") +BACKUP_DATA_DIRS = [ + os.environ.get("LOGOS_DIR", "/data/logos"), + os.environ.get("UPLOADS_DIR", "/data/uploads"), + os.environ.get("PLUGINS_DIR", "/data/plugins"), +] SERVER_IP = "127.0.0.1" diff --git a/docker/nginx.conf b/docker/nginx.conf index 5e754d20c..be2210369 100644 --- a/docker/nginx.conf +++ b/docker/nginx.conf @@ -34,6 +34,13 @@ server { root /data; } + # Internal location for X-Accel-Redirect backup downloads + # Django handles auth, nginx serves the file directly + location /protected-backups/ { + internal; + alias /data/backups/; + } + location /api/logos/(?\d+)/cache/ { proxy_pass http://127.0.0.1:5656; proxy_cache logo_cache; diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index b35ea5bfc..3814aaf6a 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -20,6 +20,7 @@ module = dispatcharr.wsgi:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings +env = USE_NGINX_ACCEL=true socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true diff --git a/frontend/src/api.js b/frontend/src/api.js index 01186bf6f..b11b59a62 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1290,6 +1290,183 @@ export default class API { } } + // Backup API (async with Celery task polling) + static async listBackups() { + try { + const response = await request(`${host}/api/backups/`); + return response || []; + } catch (e) { + errorNotification('Failed to load backups', e); + throw e; + } + } + + static async getBackupStatus(taskId, token = null) { + try { + let url = `${host}/api/backups/status/${taskId}/`; + if (token) { + url += `?token=${encodeURIComponent(token)}`; + } + const response = await request(url, { auth: !token }); + return response; + } catch (e) { + throw e; + } + } + + static async waitForBackupTask(taskId, onProgress, token = null) { + const pollInterval = 2000; // Poll every 2 seconds + const maxAttempts = 300; // Max 10 minutes (300 * 2s) + + for (let attempt = 0; attempt < maxAttempts; attempt++) { + try { + const status = await API.getBackupStatus(taskId, token); + + if (onProgress) { + onProgress(status); + } + + if (status.state === 'completed') { + return status.result; + } else if (status.state === 'failed') { + throw new Error(status.error || 'Task failed'); + } + } catch (e) { + throw e; + } + + // Wait before next poll + await new Promise((resolve) => setTimeout(resolve, pollInterval)); + } + + throw new Error('Task timed out'); + } + + static async createBackup(onProgress) { + try { + // Start the backup task + const response = await request(`${host}/api/backups/create/`, { + method: 'POST', + }); + + // Wait for the task to complete using token for auth + const result = await API.waitForBackupTask(response.task_id, onProgress, response.task_token); + return result; + } catch (e) { + errorNotification('Failed to create backup', e); + throw e; + } + } + + static async uploadBackup(file) { + try { + const formData = new FormData(); + formData.append('file', file); + + const response = await request( + `${host}/api/backups/upload/`, + { + method: 'POST', + body: formData, + } + ); + return response; + } catch (e) { + errorNotification('Failed to upload backup', e); + throw e; + } + } + + static async deleteBackup(filename) { + try { + const encodedFilename = encodeURIComponent(filename); + await request(`${host}/api/backups/${encodedFilename}/delete/`, { + method: 'DELETE', + }); + } catch (e) { + errorNotification('Failed to delete backup', e); + throw e; + } + } + + static async getDownloadToken(filename) { + // Get a download token from the server + try { + const response = await request(`${host}/api/backups/${encodeURIComponent(filename)}/download-token/`); + return response.token; + } catch (e) { + throw e; + } + } + + static async downloadBackup(filename) { + try { + // Get a download token first (requires auth) + const token = await API.getDownloadToken(filename); + const encodedFilename = encodeURIComponent(filename); + + // Build the download URL with token + const downloadUrl = `${host}/api/backups/${encodedFilename}/download/?token=${encodeURIComponent(token)}`; + + // Use direct browser navigation instead of fetch to avoid CORS issues + const link = document.createElement('a'); + link.href = downloadUrl; + link.download = filename; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + return { filename }; + } catch (e) { + errorNotification('Failed to download backup', e); + throw e; + } + } + + static async restoreBackup(filename, onProgress) { + try { + // Start the restore task + const encodedFilename = encodeURIComponent(filename); + const response = await request( + `${host}/api/backups/${encodedFilename}/restore/`, + { + method: 'POST', + } + ); + + // Wait for the task to complete using token for auth + // Token-based auth allows status polling even after DB restore invalidates user sessions + const result = await API.waitForBackupTask(response.task_id, onProgress, response.task_token); + return result; + } catch (e) { + errorNotification('Failed to restore backup', e); + throw e; + } + } + + static async getBackupSchedule() { + try { + const response = await request(`${host}/api/backups/schedule/`); + return response; + } catch (e) { + errorNotification('Failed to get backup schedule', e); + throw e; + } + } + + static async updateBackupSchedule(settings) { + try { + const response = await request(`${host}/api/backups/schedule/update/`, { + method: 'PUT', + body: settings, + }); + return response; + } catch (e) { + errorNotification('Failed to update backup schedule', e); + throw e; + } + } + static async getVersion() { try { const response = await request(`${host}/api/core/version/`, { diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx new file mode 100644 index 000000000..468bcdf99 --- /dev/null +++ b/frontend/src/components/backups/BackupManager.jsx @@ -0,0 +1,496 @@ +import { useEffect, useState } from 'react'; +import { + Alert, + Button, + Card, + Divider, + FileInput, + Group, + Loader, + Modal, + NumberInput, + Select, + Stack, + Switch, + Table, + Text, + Tooltip, +} from '@mantine/core'; +import { TimeInput } from '@mantine/dates'; +import { + Download, + PlayCircle, + RefreshCcw, + UploadCloud, + Trash2, + Clock, + Save, +} from 'lucide-react'; +import { notifications } from '@mantine/notifications'; + +import API from '../../api'; +import ConfirmationDialog from '../ConfirmationDialog'; + +const DAYS_OF_WEEK = [ + { value: '0', label: 'Sunday' }, + { value: '1', label: 'Monday' }, + { value: '2', label: 'Tuesday' }, + { value: '3', label: 'Wednesday' }, + { value: '4', label: 'Thursday' }, + { value: '5', label: 'Friday' }, + { value: '6', label: 'Saturday' }, +]; + +function formatBytes(bytes) { + if (bytes === 0) return '0 B'; + const k = 1024; + const sizes = ['B', 'KB', 'MB', 'GB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}`; +} + +function formatDate(dateString) { + const date = new Date(dateString); + return date.toLocaleString(); +} + +export default function BackupManager() { + const [backups, setBackups] = useState([]); + const [loading, setLoading] = useState(false); + const [creating, setCreating] = useState(false); + const [downloading, setDownloading] = useState(null); + const [uploadFile, setUploadFile] = useState(null); + const [uploadModalOpen, setUploadModalOpen] = useState(false); + const [restoreConfirmOpen, setRestoreConfirmOpen] = useState(false); + const [deleteConfirmOpen, setDeleteConfirmOpen] = useState(false); + const [selectedBackup, setSelectedBackup] = useState(null); + + // Schedule state + const [schedule, setSchedule] = useState({ + enabled: false, + frequency: 'daily', + time: '03:00', + day_of_week: 0, + retention_count: 0, + }); + const [scheduleLoading, setScheduleLoading] = useState(false); + const [scheduleSaving, setScheduleSaving] = useState(false); + const [scheduleChanged, setScheduleChanged] = useState(false); + + const loadBackups = async () => { + setLoading(true); + try { + const backupList = await API.listBackups(); + setBackups(backupList); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to load backups', + color: 'red', + }); + } finally { + setLoading(false); + } + }; + + const loadSchedule = async () => { + setScheduleLoading(true); + try { + const settings = await API.getBackupSchedule(); + setSchedule(settings); + setScheduleChanged(false); + } catch (error) { + // Ignore errors on initial load - settings may not exist yet + } finally { + setScheduleLoading(false); + } + }; + + useEffect(() => { + loadBackups(); + loadSchedule(); + }, []); + + const handleScheduleChange = (field, value) => { + setSchedule((prev) => ({ ...prev, [field]: value })); + setScheduleChanged(true); + }; + + const handleSaveSchedule = async () => { + setScheduleSaving(true); + try { + const updated = await API.updateBackupSchedule(schedule); + setSchedule(updated); + setScheduleChanged(false); + notifications.show({ + title: 'Success', + message: 'Backup schedule saved', + color: 'green', + }); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to save schedule', + color: 'red', + }); + } finally { + setScheduleSaving(false); + } + }; + + const handleCreateBackup = async () => { + setCreating(true); + try { + await API.createBackup(); + notifications.show({ + title: 'Success', + message: 'Backup created successfully', + color: 'green', + }); + await loadBackups(); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to create backup', + color: 'red', + }); + } finally { + setCreating(false); + } + }; + + const handleDownload = async (filename) => { + setDownloading(filename); + try { + await API.downloadBackup(filename); + notifications.show({ + title: 'Download Started', + message: `Downloading ${filename}...`, + color: 'blue', + }); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to download backup', + color: 'red', + }); + } finally { + setDownloading(null); + } + }; + + const handleDeleteClick = (backup) => { + setSelectedBackup(backup); + setDeleteConfirmOpen(true); + }; + + const handleDeleteConfirm = async () => { + try { + await API.deleteBackup(selectedBackup.name); + notifications.show({ + title: 'Success', + message: 'Backup deleted successfully', + color: 'green', + }); + await loadBackups(); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to delete backup', + color: 'red', + }); + } finally { + setDeleteConfirmOpen(false); + setSelectedBackup(null); + } + }; + + const handleRestoreClick = (backup) => { + setSelectedBackup(backup); + setRestoreConfirmOpen(true); + }; + + const handleRestoreConfirm = async () => { + try { + await API.restoreBackup(selectedBackup.name); + notifications.show({ + title: 'Success', + message: 'Backup restored successfully. You may need to refresh the page.', + color: 'green', + }); + setTimeout(() => window.location.reload(), 2000); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to restore backup', + color: 'red', + }); + } finally { + setRestoreConfirmOpen(false); + setSelectedBackup(null); + } + }; + + const handleUploadSubmit = async () => { + if (!uploadFile) return; + + try { + await API.uploadBackup(uploadFile); + notifications.show({ + title: 'Success', + message: 'Backup uploaded successfully', + color: 'green', + }); + setUploadModalOpen(false); + setUploadFile(null); + await loadBackups(); + } catch (error) { + notifications.show({ + title: 'Error', + message: error?.message || 'Failed to upload backup', + color: 'red', + }); + } + }; + + return ( + + + Backups include your database and configured data directories. Use the + create button to generate a new backup, or upload an existing backup to + restore. + + + {/* Schedule Settings */} + + + + + Scheduled Backups + + handleScheduleChange('enabled', e.currentTarget.checked)} + label={schedule.enabled ? 'Enabled' : 'Disabled'} + /> + + + {scheduleLoading ? ( + + ) : ( + <> + + handleScheduleChange('day_of_week', parseInt(value, 10))} + data={DAYS_OF_WEEK} + disabled={!schedule.enabled} + /> + )} + handleScheduleChange('retention_count', value || 0)} + min={0} + disabled={!schedule.enabled} + /> + + + + + + )} + + + + + + + Backups + + + + + + + + + {loading ? ( + + + + ) : backups.length === 0 ? ( + No backups found. Create one to get started! + ) : ( + + + + + + + + + + + {backups.map((backup) => ( + + + + + + + ))} + +
FilenameSizeCreatedActions
+ + {backup.name} + + + {formatBytes(backup.size)} + + {formatDate(backup.created)} + + + + + + + + + + + + +
+ )} + + { + setUploadModalOpen(false); + setUploadFile(null); + }} + title="Upload Backup" + > + + + + + + + + + + { + setRestoreConfirmOpen(false); + setSelectedBackup(null); + }} + onConfirm={handleRestoreConfirm} + title="Restore Backup" + message={`Are you sure you want to restore from "${selectedBackup?.name}"? This will replace all current data with the backup data. This action cannot be undone.`} + confirmLabel="Restore" + cancelLabel="Cancel" + color="orange" + /> + + { + setDeleteConfirmOpen(false); + setSelectedBackup(null); + }} + onConfirm={handleDeleteConfirm} + title="Delete Backup" + message={`Are you sure you want to delete "${selectedBackup?.name}"? This action cannot be undone.`} + confirmLabel="Delete" + cancelLabel="Cancel" + color="red" + /> +
+ ); +} diff --git a/frontend/src/pages/Settings.jsx b/frontend/src/pages/Settings.jsx index df7a755a3..62e7f3ce6 100644 --- a/frontend/src/pages/Settings.jsx +++ b/frontend/src/pages/Settings.jsx @@ -22,6 +22,7 @@ import { import { isNotEmpty, useForm } from '@mantine/form'; import UserAgentsTable from '../components/tables/UserAgentsTable'; import StreamProfilesTable from '../components/tables/StreamProfilesTable'; +import BackupManager from '../components/backups/BackupManager'; import useLocalStorage from '../hooks/useLocalStorage'; import useAuthStore from '../store/auth'; import { @@ -862,6 +863,13 @@ const SettingsPage = () => { + + + Backup & Restore + + + + )} From 3fb18ecce8ac70fe23983eace7484a1cdb1f5def Mon Sep 17 00:00:00 2001 From: Jim McBride Date: Thu, 27 Nov 2025 08:49:29 -0600 Subject: [PATCH 02/70] Enhancement: Respect user's 12h/24h time format preference in backup scheduler - Read time-format setting from UI Settings via useLocalStorage - Show 12-hour time input with AM/PM selector when user prefers 12h - Show 24-hour time input when user prefers 24h - Backend always stores 24-hour format (no API changes) --- .../src/components/backups/BackupManager.jsx | 96 +++++++++++++++++-- 1 file changed, 89 insertions(+), 7 deletions(-) diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx index 468bcdf99..6ba487b55 100644 --- a/frontend/src/components/backups/BackupManager.jsx +++ b/frontend/src/components/backups/BackupManager.jsx @@ -14,9 +14,9 @@ import { Switch, Table, Text, + TextInput, Tooltip, } from '@mantine/core'; -import { TimeInput } from '@mantine/dates'; import { Download, PlayCircle, @@ -30,6 +30,32 @@ import { notifications } from '@mantine/notifications'; import API from '../../api'; import ConfirmationDialog from '../ConfirmationDialog'; +import useLocalStorage from '../../hooks/useLocalStorage'; + +// Convert 24h time string to 12h format with period +function to12Hour(time24) { + if (!time24) return { time: '12:00', period: 'AM' }; + const [hours, minutes] = time24.split(':').map(Number); + const period = hours >= 12 ? 'PM' : 'AM'; + const hours12 = hours % 12 || 12; + return { + time: `${hours12}:${String(minutes).padStart(2, '0')}`, + period, + }; +} + +// Convert 12h time + period to 24h format +function to24Hour(time12, period) { + if (!time12) return '00:00'; + const [hours, minutes] = time12.split(':').map(Number); + let hours24 = hours; + if (period === 'PM' && hours !== 12) { + hours24 = hours + 12; + } else if (period === 'AM' && hours === 12) { + hours24 = 0; + } + return `${String(hours24).padStart(2, '0')}:${String(minutes).padStart(2, '0')}`; +} const DAYS_OF_WEEK = [ { value: '0', label: 'Sunday' }, @@ -65,6 +91,10 @@ export default function BackupManager() { const [deleteConfirmOpen, setDeleteConfirmOpen] = useState(false); const [selectedBackup, setSelectedBackup] = useState(null); + // Read user's time format preference from settings + const [timeFormat] = useLocalStorage('time-format', '12h'); + const is12Hour = timeFormat === '12h'; + // Schedule state const [schedule, setSchedule] = useState({ enabled: false, @@ -77,6 +107,10 @@ export default function BackupManager() { const [scheduleSaving, setScheduleSaving] = useState(false); const [scheduleChanged, setScheduleChanged] = useState(false); + // For 12-hour display mode + const [displayTime, setDisplayTime] = useState('3:00'); + const [timePeriod, setTimePeriod] = useState('AM'); + const loadBackups = async () => { setLoading(true); try { @@ -99,6 +133,10 @@ export default function BackupManager() { const settings = await API.getBackupSchedule(); setSchedule(settings); setScheduleChanged(false); + // Initialize 12-hour display values from the loaded time + const { time, period } = to12Hour(settings.time); + setDisplayTime(time); + setTimePeriod(period); } catch (error) { // Ignore errors on initial load - settings may not exist yet } finally { @@ -116,6 +154,26 @@ export default function BackupManager() { setScheduleChanged(true); }; + // Handle time changes in 12-hour mode + const handleTimeChange12h = (newTime, newPeriod) => { + const time = newTime ?? displayTime; + const period = newPeriod ?? timePeriod; + setDisplayTime(time); + setTimePeriod(period); + // Convert to 24h and update schedule + const time24 = to24Hour(time, period); + handleScheduleChange('time', time24); + }; + + // Handle time changes in 24-hour mode + const handleTimeChange24h = (value) => { + handleScheduleChange('time', value); + // Also update 12h display state in case user switches formats + const { time, period } = to12Hour(value); + setDisplayTime(time); + setTimePeriod(period); + }; + const handleSaveSchedule = async () => { setScheduleSaving(true); try { @@ -290,12 +348,36 @@ export default function BackupManager() { ]} disabled={!schedule.enabled} /> - handleScheduleChange('time', e.currentTarget.value)} - disabled={!schedule.enabled} - /> + {is12Hour ? ( + + handleTimeChange12h(e.currentTarget.value, null)} + placeholder="3:00" + disabled={!schedule.enabled} + style={{ flex: 2 }} + /> + Date: Sun, 30 Nov 2025 00:39:30 +1100 Subject: [PATCH 03/70] fix: allow all IPv6 CIDRs by default This change ensures that by default, IPv6 clients can connect to the service unless explicitly denied. Fixes #593 --- dispatcharr/utils.py | 2 +- frontend/src/pages/Settings.jsx | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dispatcharr/utils.py b/dispatcharr/utils.py index 260515fc8..56243b7ac 100644 --- a/dispatcharr/utils.py +++ b/dispatcharr/utils.py @@ -44,7 +44,7 @@ def network_access_allowed(request, settings_key): cidrs = ( network_access[settings_key].split(",") if settings_key in network_access - else ["0.0.0.0/0"] + else ["0.0.0.0/0", "::/0"] ) network_allowed = False diff --git a/frontend/src/pages/Settings.jsx b/frontend/src/pages/Settings.jsx index 10f6f5a2a..5c25897a8 100644 --- a/frontend/src/pages/Settings.jsx +++ b/frontend/src/pages/Settings.jsx @@ -278,7 +278,7 @@ const SettingsPage = () => { const networkAccessForm = useForm({ mode: 'controlled', initialValues: Object.keys(NETWORK_ACCESS_OPTIONS).reduce((acc, key) => { - acc[key] = '0.0.0.0/0,::0/0'; + acc[key] = '0.0.0.0/0,::/0'; return acc; }, {}), validate: Object.keys(NETWORK_ACCESS_OPTIONS).reduce((acc, key) => { @@ -358,7 +358,7 @@ const SettingsPage = () => { ); networkAccessForm.setValues( Object.keys(NETWORK_ACCESS_OPTIONS).reduce((acc, key) => { - acc[key] = networkAccessSettings[key] || '0.0.0.0/0,::0/0'; + acc[key] = networkAccessSettings[key] || '0.0.0.0/0,::/0'; return acc; }, {}) ); From 43949c3ef432d14fb9add2c7dca56aa3709959a6 Mon Sep 17 00:00:00 2001 From: 3l3m3nt Date: Sun, 30 Nov 2025 19:30:47 +1300 Subject: [PATCH 04/70] Added IPv6 port bind to nginx.conf --- docker/nginx.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/nginx.conf b/docker/nginx.conf index 5e754d20c..020bc99ad 100644 --- a/docker/nginx.conf +++ b/docker/nginx.conf @@ -3,6 +3,7 @@ proxy_cache_path /app/logo_cache levels=1:2 keys_zone=logo_cache:10m server { listen NGINX_PORT; + listen [::]:NGINX_PORT; proxy_connect_timeout 75; proxy_send_timeout 300; From 641dcfc21e40cb05d42123f0276ebcc705c46608 Mon Sep 17 00:00:00 2001 From: GitHub Copilot Date: Sun, 30 Nov 2025 19:20:25 +0000 Subject: [PATCH 05/70] Add sorting functionality to Group and M3U columns in Streams table - Added m3u_account__name to backend ordering_fields in StreamViewSet - Implemented field mapping in frontend to convert column IDs to backend field names - Added sort buttons to both Group and M3U columns with proper icons - Sort buttons show current sort state (ascending/descending/none) - Maintains consistent UX with existing Name column sorting --- apps/channels/api_views.py | 2 +- .../src/components/tables/StreamsTable.jsx | 87 ++++++++++++------- 2 files changed, 56 insertions(+), 33 deletions(-) diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index bc9205379..eccc50280 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -124,7 +124,7 @@ class StreamViewSet(viewsets.ModelViewSet): filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] filterset_class = StreamFilter search_fields = ["name", "channel_group__name"] - ordering_fields = ["name", "channel_group__name"] + ordering_fields = ["name", "channel_group__name", "m3u_account__name"] ordering = ["-name"] def get_permissions(self): diff --git a/frontend/src/components/tables/StreamsTable.jsx b/frontend/src/components/tables/StreamsTable.jsx index d309552ca..a0ae1f5e2 100644 --- a/frontend/src/components/tables/StreamsTable.jsx +++ b/frontend/src/components/tables/StreamsTable.jsx @@ -385,7 +385,14 @@ const StreamsTable = () => { // Apply sorting if (sorting.length > 0) { - const sortField = sorting[0].id; + const columnId = sorting[0].id; + // Map frontend column IDs to backend field names + const fieldMapping = { + name: 'name', + group: 'channel_group__name', + m3u: 'm3u_account__name', + }; + const sortField = fieldMapping[columnId] || columnId; const sortDirection = sorting[0].desc ? '-' : ''; params.append('ordering', `${sortDirection}${sortField}`); } @@ -747,41 +754,57 @@ const StreamsTable = () => { case 'group': return ( - - - + + + + +
+ {React.createElement(sortingIcon, { + onClick: () => onSortingChange('group'), + size: 14, + })} +
+
); case 'm3u': return ( - - ({ + label: playlist.name, + value: `${playlist.id}`, + }))} + variant="unstyled" + className="table-input-header" + /> + +
+ {React.createElement(sortingIcon, { + onClick: () => onSortingChange('m3u'), + size: 14, + })} +
+ ); } }; From cf08e54bd822e38e42779e7702d774807a92571f Mon Sep 17 00:00:00 2001 From: root Date: Mon, 1 Dec 2025 18:11:58 +0000 Subject: [PATCH 06/70] Fix sorting functionality for Group and M3U columns - Add missing header properties to group and m3u columns - Fix layout issues with sort buttons (proper flex layout, remove blocking onClick) - Fix sorting state initialization (use boolean instead of empty string) - Fix sorting comparison operators (use strict equality) - Fix 3rd click behavior to return to default sort instead of clearing - Map frontend column IDs to backend field names for proper API requests --- .../src/components/tables/StreamsTable.jsx | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/frontend/src/components/tables/StreamsTable.jsx b/frontend/src/components/tables/StreamsTable.jsx index a0ae1f5e2..ca3ee21f2 100644 --- a/frontend/src/components/tables/StreamsTable.jsx +++ b/frontend/src/components/tables/StreamsTable.jsx @@ -182,7 +182,7 @@ const StreamsTable = () => { const [pageCount, setPageCount] = useState(0); const [paginationString, setPaginationString] = useState(''); const [isLoading, setIsLoading] = useState(true); - const [sorting, setSorting] = useState([{ id: 'name', desc: '' }]); + const [sorting, setSorting] = useState([{ id: 'name', desc: true }]); const [selectedStreamIds, setSelectedStreamIds] = useState([]); // Channel numbering modal state @@ -298,6 +298,7 @@ const StreamsTable = () => { ), }, { + header: 'Group', id: 'group', accessorFn: (row) => channelGroups[row.channel_group] @@ -319,6 +320,7 @@ const StreamsTable = () => { ), }, { + header: 'M3U', id: 'm3u', size: columnSizing.m3u || 150, accessorFn: (row) => @@ -698,8 +700,8 @@ const StreamsTable = () => { const sortField = sorting[0]?.id; const sortDirection = sorting[0]?.desc; - if (sortField == column) { - if (sortDirection == false) { + if (sortField === column) { + if (sortDirection === false) { setSorting([ { id: column, @@ -707,7 +709,8 @@ const StreamsTable = () => { }, ]); } else { - setSorting([]); + // Reset to default sort (name descending) instead of clearing + setSorting([{ id: 'name', desc: true }]); } } else { setSorting([ @@ -754,8 +757,8 @@ const StreamsTable = () => { case 'group': return ( - - + + { clearable /> -
+
{React.createElement(sortingIcon, { onClick: () => onSortingChange('group'), size: 14, @@ -780,8 +783,8 @@ const StreamsTable = () => { case 'm3u': return ( - - + + )} handleScheduleChange('retention_count', value || 0)} min={0} disabled={!schedule.enabled} /> - + - + )} - - - - - - - Backups - - - - - + + + + + + + + + + + + + - Create Backup - - - - - {loading ? ( - - - - ) : backups.length === 0 ? ( - No backups found. Create one to get started! - ) : ( - - - - - - - - - - - {backups.map((backup) => ( - - - - - - - ))} - -
FilenameSizeCreatedActions
- - {backup.name} - - - {formatBytes(backup.size)} - - {formatDate(backup.created)} - - - - - - - - - - - - -
- )} + {loading ? ( + + + + ) : backups.length === 0 ? ( + + No backups found. Create one to get started. + + ) : ( + + + + Filename + Size + Created + Actions + + + + {backups.map((backup) => ( + + + + {backup.name} + + + + {formatBytes(backup.size)} + + + {formatDate(backup.created)} + + + + + handleDownload(backup.name)} + loading={downloading === backup.name} + disabled={downloading !== null} + > + + + + + handleRestoreClick(backup)} + > + + + + + handleDeleteClick(backup)} + > + + + + + + + ))} + +
+ )} +
+
+ - + - @@ -557,7 +592,6 @@ export default function BackupManager() { message={`Are you sure you want to restore from "${selectedBackup?.name}"? This will replace all current data with the backup data. This action cannot be undone.`} confirmLabel="Restore" cancelLabel="Cancel" - color="orange" /> ); From 70cf8928c457e162bb738480ac2ac80a1c4084af Mon Sep 17 00:00:00 2001 From: Jim McBride Date: Tue, 2 Dec 2025 22:01:59 -0600 Subject: [PATCH 12/70] Use CustomTable component for backup list --- .../src/components/backups/BackupManager.jsx | 174 +++++++++++------- 1 file changed, 112 insertions(+), 62 deletions(-) diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx index f538d7daf..1e4d4fb8b 100644 --- a/frontend/src/components/backups/BackupManager.jsx +++ b/frontend/src/components/backups/BackupManager.jsx @@ -1,8 +1,9 @@ -import { useEffect, useState } from 'react'; +import { useEffect, useMemo, useState } from 'react'; import { ActionIcon, Box, Button, + Center, FileInput, Flex, Group, @@ -13,7 +14,6 @@ import { Select, Stack, Switch, - Table, Text, TextInput, Tooltip, @@ -31,6 +31,7 @@ import { notifications } from '@mantine/notifications'; import API from '../../api'; import ConfirmationDialog from '../ConfirmationDialog'; import useLocalStorage from '../../hooks/useLocalStorage'; +import { CustomTable, useTable } from '../tables/CustomTable'; // Convert 24h time string to 12h format with period function to12Hour(time24) { @@ -111,6 +112,112 @@ export default function BackupManager() { const [displayTime, setDisplayTime] = useState('3:00'); const [timePeriod, setTimePeriod] = useState('AM'); + const columns = useMemo( + () => [ + { + header: 'Filename', + accessorKey: 'name', + size: 250, + cell: ({ cell }) => ( + + {cell.getValue()} + + ), + }, + { + header: 'Size', + accessorKey: 'size', + size: 100, + cell: ({ cell }) => ( + {formatBytes(cell.getValue())} + ), + }, + { + header: 'Created', + accessorKey: 'created', + size: 175, + cell: ({ cell }) => ( + {formatDate(cell.getValue())} + ), + }, + { + id: 'actions', + header: 'Actions', + size: 150, + }, + ], + [] + ); + + const renderHeaderCell = (header) => { + if (header.id === 'actions') { + return ( +
+ {header.column.columnDef.header} +
+ ); + } + return ( + + {header.column.columnDef.header} + + ); + }; + + const renderBodyCell = ({ cell, row }) => { + if (cell.column.id === 'actions') { + return ( +
+ + handleDownload(row.original.name)} + loading={downloading === row.original.name} + disabled={downloading !== null} + > + + + + + handleRestoreClick(row.original)} + > + + + + + handleDeleteClick(row.original)} + > + + + +
+ ); + } + return null; + }; + + const table = useTable({ + columns, + data: backups, + allRowIds: backups.map((b) => b.name), + bodyCellRenderFns: { + actions: renderBodyCell, + }, + headerCellRenderFns: { + name: renderHeaderCell, + size: renderHeaderCell, + created: renderHeaderCell, + actions: renderHeaderCell, + }, + }); + const loadBackups = async () => { setLoading(true); try { @@ -483,66 +590,9 @@ export default function BackupManager() { No backups found. Create one to get started. ) : ( - - - - Filename - Size - Created - Actions - - - - {backups.map((backup) => ( - - - - {backup.name} - - - - {formatBytes(backup.size)} - - - {formatDate(backup.created)} - - - - - handleDownload(backup.name)} - loading={downloading === backup.name} - disabled={downloading !== null} - > - - - - - handleRestoreClick(backup)} - > - - - - - handleDeleteClick(backup)} - > - - - - - - - ))} - -
+
+ +
)} From e64002dfc4bb6de00d38256e77520b6fd83c8630 Mon Sep 17 00:00:00 2001 From: Jim McBride Date: Tue, 2 Dec 2025 22:19:20 -0600 Subject: [PATCH 13/70] Refactor BackupManager to match app table conventions --- .../src/components/backups/BackupManager.jsx | 143 ++++++++++-------- 1 file changed, 77 insertions(+), 66 deletions(-) diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx index 1e4d4fb8b..46bd06890 100644 --- a/frontend/src/components/backups/BackupManager.jsx +++ b/frontend/src/components/backups/BackupManager.jsx @@ -3,7 +3,6 @@ import { ActionIcon, Box, Button, - Center, FileInput, Flex, Group, @@ -33,6 +32,45 @@ import ConfirmationDialog from '../ConfirmationDialog'; import useLocalStorage from '../../hooks/useLocalStorage'; import { CustomTable, useTable } from '../tables/CustomTable'; +const RowActions = ({ row, handleDownload, handleRestoreClick, handleDeleteClick, downloading }) => { + return ( + + + handleDownload(row.original.name)} + loading={downloading === row.original.name} + disabled={downloading !== null} + > + + + + + handleRestoreClick(row.original)} + > + + + + + handleDeleteClick(row.original)} + > + + + + + ); +}; + // Convert 24h time string to 12h format with period function to12Hour(time24) { if (!time24) return { time: '12:00', period: 'AM' }; @@ -94,6 +132,7 @@ export default function BackupManager() { // Read user's time format preference from settings const [timeFormat] = useLocalStorage('time-format', '12h'); + const [tableSize] = useLocalStorage('table-size', 'default'); const is12Hour = timeFormat === '12h'; // Schedule state @@ -117,17 +156,23 @@ export default function BackupManager() { { header: 'Filename', accessorKey: 'name', - size: 250, + grow: true, cell: ({ cell }) => ( - +
{cell.getValue()} - +
), }, { header: 'Size', accessorKey: 'size', - size: 100, + size: 80, cell: ({ cell }) => ( {formatBytes(cell.getValue())} ), @@ -135,7 +180,7 @@ export default function BackupManager() { { header: 'Created', accessorKey: 'created', - size: 175, + size: 160, cell: ({ cell }) => ( {formatDate(cell.getValue())} ), @@ -143,20 +188,13 @@ export default function BackupManager() { { id: 'actions', header: 'Actions', - size: 150, + size: tableSize === 'compact' ? 75 : 100, }, ], - [] + [tableSize] ); const renderHeaderCell = (header) => { - if (header.id === 'actions') { - return ( -
- {header.column.columnDef.header} -
- ); - } return ( {header.column.columnDef.header} @@ -165,42 +203,18 @@ export default function BackupManager() { }; const renderBodyCell = ({ cell, row }) => { - if (cell.column.id === 'actions') { - return ( -
- - handleDownload(row.original.name)} - loading={downloading === row.original.name} - disabled={downloading !== null} - > - - - - - handleRestoreClick(row.original)} - > - - - - - handleDeleteClick(row.original)} - > - - - -
- ); + switch (cell.column.id) { + case 'actions': + return ( + + ); } - return null; }; const table = useTable({ @@ -435,7 +449,7 @@ export default function BackupManager() { ) : ( <> - + handleScheduleChange('day_of_week', parseInt(value, 10))} + data={DAYS_OF_WEEK} + disabled={!schedule.enabled} + /> + )} {is12Hour ? ( handleTimeChange12h(e.currentTarget.value, null)} placeholder="3:00" disabled={!schedule.enabled} - style={{ flex: 2 }} /> handleScheduleChange('day_of_week', parseInt(value, 10))} - data={DAYS_OF_WEEK} - disabled={!schedule.enabled} - /> - )} handleScheduleChange('retention_count', value || 0)} min={0} disabled={!schedule.enabled} /> - - - + )} From 81b657036611633a9828232a3cf3fd297c06604f Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Wed, 3 Dec 2025 17:03:58 -0600 Subject: [PATCH 14/70] Fix name not sorting. --- frontend/src/components/tables/StreamsTable.jsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/src/components/tables/StreamsTable.jsx b/frontend/src/components/tables/StreamsTable.jsx index db8b43bca..3e497f99a 100644 --- a/frontend/src/components/tables/StreamsTable.jsx +++ b/frontend/src/components/tables/StreamsTable.jsx @@ -183,7 +183,7 @@ const StreamsTable = () => { const [pageCount, setPageCount] = useState(0); const [paginationString, setPaginationString] = useState(''); const [isLoading, setIsLoading] = useState(true); - const [sorting, setSorting] = useState([{ id: 'name', desc: true }]); + const [sorting, setSorting] = useState([{ id: 'name', desc: false }]); const [selectedStreamIds, setSelectedStreamIds] = useState([]); // Channel numbering modal state @@ -710,8 +710,8 @@ const StreamsTable = () => { }, ]); } else { - // Reset to default sort (name descending) instead of clearing - setSorting([{ id: 'name', desc: true }]); + // Reset to default sort (name ascending) instead of clearing + setSorting([{ id: 'name', desc: false }]); } } else { setSorting([ From 5fce83fb5126e9a76df5bbd12c03ef60d333b2a8 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Wed, 3 Dec 2025 17:13:50 -0600 Subject: [PATCH 15/70] style: Adjust table header and input components for consistent width --- .../tables/CustomTable/CustomTableHeader.jsx | 1 + .../src/components/tables/StreamsTable.jsx | 102 ++++++++++-------- 2 files changed, 56 insertions(+), 47 deletions(-) diff --git a/frontend/src/components/tables/CustomTable/CustomTableHeader.jsx b/frontend/src/components/tables/CustomTable/CustomTableHeader.jsx index 92643fc93..004687dd9 100644 --- a/frontend/src/components/tables/CustomTable/CustomTableHeader.jsx +++ b/frontend/src/components/tables/CustomTable/CustomTableHeader.jsx @@ -105,6 +105,7 @@ const CustomTableHeader = ({ ...(header.column.columnDef.style && header.column.columnDef.style), height: '100%', + width: '100%', paddingRight: header.column.getCanResize() ? '8px' : '0px', // Add padding for resize handle }} > diff --git a/frontend/src/components/tables/StreamsTable.jsx b/frontend/src/components/tables/StreamsTable.jsx index 3e497f99a..f3f4dc20e 100644 --- a/frontend/src/components/tables/StreamsTable.jsx +++ b/frontend/src/components/tables/StreamsTable.jsx @@ -736,7 +736,7 @@ const StreamsTable = () => { switch (header.id) { case 'name': return ( - + { variant="unstyled" className="table-input-header" leftSection={} - /> -
- {React.createElement(sortingIcon, { - onClick: () => onSortingChange('name'), + style={{ flex: 1, minWidth: 0 }} + rightSectionPointerEvents="auto" + rightSection={React.createElement(sortingIcon, { + onClick: (e) => { + e.stopPropagation(); + onSortingChange('name'); + }, size: 14, + style: { cursor: 'pointer' }, })} -
+ />
); case 'group': return ( - - - - -
- {React.createElement(sortingIcon, { - onClick: () => onSortingChange('group'), + + { + e.stopPropagation(); + onSortingChange('group'); + }, size: 14, + style: { cursor: 'pointer' }, })} -
+ />
); case 'm3u': return ( - - - ({ + label: playlist.name, + value: `${playlist.id}`, + }))} + variant="unstyled" + className="table-input-header" + style={{ flex: 1, minWidth: 0 }} + rightSectionPointerEvents="auto" + rightSection={React.createElement(sortingIcon, { + onClick: (e) => { + e.stopPropagation(); + onSortingChange('m3u'); + }, size: 14, + style: { cursor: 'pointer' }, })} -
+ /> ); } From 2de6ac5da1802566f220a7abb359943b8a2c61f3 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Wed, 3 Dec 2025 17:31:16 -0600 Subject: [PATCH 16/70] changelog: Add sort buttons for 'Group' and 'M3U' columns in Streams table --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8501b1229..501c0aea4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Sort buttons for 'Group' and 'M3U' columns in Streams table for improved stream organization and filtering - Thanks [@bobey6](https://github.com/bobey6) + ### Changed - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) From 2a8ba9125c053de988bb5a7a446118f83e3c5223 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 4 Dec 2025 14:07:28 -0600 Subject: [PATCH 17/70] perf: optimize EPG program parsing for multi-channel sources Dramatically improve EPG refresh performance by parsing the XML file once per source instead of once per channel. The new implementation: - Pre-filters to only process EPG entries mapped to actual channels - Parses the entire XML file in a single pass - Uses O(1) set lookups to skip unmapped channel programmes - Skips non-mapped channels entirely with minimal overhead For EPG sources with many channels but few mapped (e.g., 10,000 channels with 100 mapped to channels), this provides approximately: - 99% reduction in file open operations - 99% reduction in XML file scans - Proportional reduction in CPU and I/O overhead The parse_programs_for_tvg_id() function is retained for single-channel use cases (e.g., when a new channel is mapped via signals). Fixes inefficient repeated file parsing that was occurring with large EPG sources. --- CHANGELOG.md | 1 + apps/epg/tasks.py | 277 ++++++++++++++++++++++++++++++++++------------ 2 files changed, 208 insertions(+), 70 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 501c0aea4..651005394 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed +- **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) - nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 59d658b19..3ed222d93 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1393,11 +1393,23 @@ def parse_programs_for_tvg_id(epg_id): def parse_programs_for_source(epg_source, tvg_id=None): + """ + Parse programs for all MAPPED channels from an EPG source in a single pass. + + This is an optimized version that: + 1. Only processes EPG entries that are actually mapped to channels + 2. Parses the XML file ONCE instead of once per channel + 3. Skips programmes for unmapped channels entirely during parsing + + This dramatically improves performance when an EPG source has many channels + but only a fraction are mapped. + """ # Send initial programs parsing notification send_epg_update(epg_source.id, "parsing_programs", 0) should_log_memory = False process = None initial_memory = 0 + source_file = None # Add memory tracking only in trace mode or higher try: @@ -1417,82 +1429,199 @@ def parse_programs_for_source(epg_source, tvg_id=None): should_log_memory = False try: - # Process EPG entries in batches rather than all at once - batch_size = 20 # Process fewer channels at once to reduce memory usage - epg_count = EPGData.objects.filter(epg_source=epg_source).count() + # Only get EPG entries that are actually mapped to channels + mapped_epg_ids = set( + Channel.objects.filter( + epg_data__epg_source=epg_source, + epg_data__isnull=False + ).values_list('epg_data_id', flat=True) + ) - if epg_count == 0: - logger.info(f"No EPG entries found for source: {epg_source.name}") - # Update status - this is not an error, just no entries + if not mapped_epg_ids: + total_epg_count = EPGData.objects.filter(epg_source=epg_source).count() + logger.info(f"No channels mapped to any EPG entries from source: {epg_source.name} " + f"(source has {total_epg_count} EPG entries, 0 mapped)") + # Update status - this is not an error, just no mapped entries epg_source.status = 'success' - epg_source.save(update_fields=['status']) + epg_source.last_message = f"No channels mapped to this EPG source ({total_epg_count} entries available)" + epg_source.save(update_fields=['status', 'last_message']) send_epg_update(epg_source.id, "parsing_programs", 100, status="success") return True - logger.info(f"Parsing programs for {epg_count} EPG entries from source: {epg_source.name}") + # Get the mapped EPG entries with their tvg_ids + mapped_epgs = EPGData.objects.filter(id__in=mapped_epg_ids).values('id', 'tvg_id') + tvg_id_to_epg_id = {epg['tvg_id']: epg['id'] for epg in mapped_epgs if epg['tvg_id']} + mapped_tvg_ids = set(tvg_id_to_epg_id.keys()) - failed_entries = [] - program_count = 0 - channel_count = 0 - updated_count = 0 - processed = 0 - # Process in batches using cursor-based approach to limit memory usage - last_id = 0 - while True: - # Get a batch of EPG entries - batch_entries = list(EPGData.objects.filter( - epg_source=epg_source, - id__gt=last_id - ).order_by('id')[:batch_size]) + total_epg_count = EPGData.objects.filter(epg_source=epg_source).count() + mapped_count = len(mapped_tvg_ids) - if not batch_entries: - break # No more entries to process + logger.info(f"Parsing programs for {mapped_count} MAPPED channels from source: {epg_source.name} " + f"(skipping {total_epg_count - mapped_count} unmapped EPG entries)") - # Update last_id for next iteration - last_id = batch_entries[-1].id + # Get the file path + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + if not file_path: + file_path = epg_source.get_cache_file() - # Process this batch - for epg in batch_entries: - if epg.tvg_id: - try: - result = parse_programs_for_tvg_id(epg.id) - if result == "Task already running": - logger.info(f"Program parse for {epg.id} already in progress, skipping") + # Check if the file exists + if not os.path.exists(file_path): + logger.error(f"EPG file not found at: {file_path}") - processed += 1 - progress = min(95, int((processed / epg_count) * 100)) if epg_count > 0 else 50 - send_epg_update(epg_source.id, "parsing_programs", progress) - except Exception as e: - logger.error(f"Error parsing programs for tvg_id={epg.tvg_id}: {e}", exc_info=True) - failed_entries.append(f"{epg.tvg_id}: {str(e)}") + if epg_source.url: + # Update the file path in the database + new_path = epg_source.get_cache_file() + logger.info(f"Updating file_path from '{file_path}' to '{new_path}'") + epg_source.file_path = new_path + epg_source.save(update_fields=['file_path']) + logger.info(f"Fetching new EPG data from URL: {epg_source.url}") - # Force garbage collection after each batch - batch_entries = None # Remove reference to help garbage collection - gc.collect() + # Fetch new data before continuing + fetch_success = fetch_xmltv(epg_source) + + if not fetch_success: + logger.error(f"Failed to fetch EPG data for source: {epg_source.name}") + epg_source.status = 'error' + epg_source.last_message = f"Failed to download EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file") + return False + + # Update file_path with the new location + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + else: + logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data") + epg_source.status = 'error' + epg_source.last_message = f"No URL provided, cannot fetch EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided") + return False + + # Delete existing programs for all mapped EPGs in one query + logger.info(f"Deleting existing programs for {mapped_count} mapped EPG entries...") + deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0] + logger.info(f"Deleted {deleted_count} existing programs") - # If there were failures, include them in the message but continue - if failed_entries: - epg_source.status = EPGSource.STATUS_SUCCESS # Still mark as success if some processed - error_summary = f"Failed to parse {len(failed_entries)} of {epg_count} entries" - stats_summary = f"Processed {program_count} programs across {channel_count} channels. Updated: {updated_count}." - epg_source.last_message = f"{stats_summary} Warning: {error_summary}" - epg_source.updated_at = timezone.now() - epg_source.save(update_fields=['status', 'last_message', 'updated_at']) - - # Send completion notification with mixed status - send_epg_update(epg_source.id, "parsing_programs", 100, - status="success", - message=epg_source.last_message) - - # Explicitly release memory of large lists before returning - del failed_entries + # SINGLE PASS PARSING: Parse the XML file once and process all mapped channels + programs_to_create = [] + programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel + total_programs = 0 + skipped_programs = 0 + batch_size = 1000 + last_progress_update = 0 + + try: + logger.debug(f"Opening file for single-pass parsing: {file_path}") + source_file = open(file_path, 'rb') + + # Stream parse the file using lxml's iterparse + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True) + + for _, elem in program_parser: + channel_id = elem.get('channel') + + # Skip programmes for unmapped channels immediately + if channel_id not in mapped_tvg_ids: + skipped_programs += 1 + # Clear element to free memory + clear_element(elem) + continue + + # This programme is for a mapped channel - process it + try: + start_time = parse_xmltv_time(elem.get('start')) + end_time = parse_xmltv_time(elem.get('stop')) + title = None + desc = None + sub_title = None + + # Efficiently process child elements + for child in elem: + if child.tag == 'title': + title = child.text or 'No Title' + elif child.tag == 'desc': + desc = child.text or '' + elif child.tag == 'sub-title': + sub_title = child.text or '' + + if not title: + title = 'No Title' + + # Extract custom properties + custom_props = extract_custom_properties(elem) + custom_properties_json = custom_props if custom_props else None + + epg_id = tvg_id_to_epg_id[channel_id] + programs_to_create.append(ProgramData( + epg_id=epg_id, + start_time=start_time, + end_time=end_time, + title=title, + description=desc, + sub_title=sub_title, + tvg_id=channel_id, + custom_properties=custom_properties_json + )) + total_programs += 1 + programs_by_channel[channel_id] += 1 + + # Clear the element to free memory + clear_element(elem) + + # Batch processing + if len(programs_to_create) >= batch_size: + ProgramData.objects.bulk_create(programs_to_create) + logger.debug(f"Saved batch of {len(programs_to_create)} programs (total: {total_programs})") + programs_to_create = [] + + # Send progress update (estimate based on programs processed) + # We don't know total programs upfront, so use a rough estimate + if total_programs - last_progress_update >= 5000: + last_progress_update = total_programs + # Cap at 90% until we're done + progress = min(90, 10 + int((total_programs / max(total_programs + 10000, 1)) * 80)) + send_epg_update(epg_source.id, "parsing_programs", progress, + processed=total_programs, channels=mapped_count) + + # Periodic garbage collection + if total_programs % (batch_size * 5) == 0: + gc.collect() + + except Exception as e: + logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True) + clear_element(elem) + continue + + # Process any remaining items + if programs_to_create: + ProgramData.objects.bulk_create(programs_to_create) + logger.debug(f"Saved final batch of {len(programs_to_create)} programs") + + except etree.XMLSyntaxError as xml_error: + logger.error(f"XML syntax error parsing program data: {xml_error}") + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"XML parsing error: {str(xml_error)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(xml_error)) + return False + except Exception as e: + logger.error(f"Error parsing XML for programs: {e}", exc_info=True) + raise + finally: + if source_file: + source_file.close() + source_file = None gc.collect() - return True + # Count channels that actually got programs + channels_with_programs = sum(1 for count in programs_by_channel.values() if count > 0) - # If all successful, set a comprehensive success message + # Success message epg_source.status = EPGSource.STATUS_SUCCESS - epg_source.last_message = f"Successfully processed {program_count} programs across {channel_count} channels. Updated: {updated_count}." + epg_source.last_message = ( + f"Parsed {total_programs:,} programs for {channels_with_programs} channels " + f"(skipped {skipped_programs:,} programmes for {total_epg_count - mapped_count} unmapped channels)" + ) epg_source.updated_at = timezone.now() epg_source.save(update_fields=['status', 'last_message', 'updated_at']) @@ -1500,9 +1629,10 @@ def parse_programs_for_source(epg_source, tvg_id=None): log_system_event( event_type='epg_refresh', source_name=epg_source.name, - programs=program_count, - channels=channel_count, - updated=updated_count, + programs=total_programs, + channels=channels_with_programs, + skipped_programs=skipped_programs, + unmapped_channels=total_epg_count - mapped_count, ) # Send completion notification with status @@ -1510,7 +1640,9 @@ def parse_programs_for_source(epg_source, tvg_id=None): status="success", message=epg_source.last_message) - logger.info(f"Completed parsing all programs for source: {epg_source.name}") + logger.info(f"Completed parsing programs for source: {epg_source.name} - " + f"{total_programs:,} programs for {channels_with_programs} channels, " + f"skipped {skipped_programs:,} programmes for unmapped channels") return True except Exception as e: @@ -1525,14 +1657,19 @@ def parse_programs_for_source(epg_source, tvg_id=None): return False finally: # Final memory cleanup and tracking - + if source_file: + try: + source_file.close() + except: + pass + source_file = None # Explicitly release any remaining large data structures - failed_entries = None - program_count = None - channel_count = None - updated_count = None - processed = None + programs_to_create = None + programs_by_channel = None + mapped_epg_ids = None + mapped_tvg_ids = None + tvg_id_to_epg_id = None gc.collect() # Add comprehensive memory cleanup at the end From 256ac2f55ab64a6f56f041957f1ae73f8ef6357f Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 4 Dec 2025 14:25:44 -0600 Subject: [PATCH 18/70] Enhancement: Clean up orphaned programs for unmapped EPG entries --- CHANGELOG.md | 2 +- apps/epg/tasks.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 651005394..9bc21c7c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed -- **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. +- **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) - nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 3ed222d93..4a4adddd6 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1502,6 +1502,17 @@ def parse_programs_for_source(epg_source, tvg_id=None): deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0] logger.info(f"Deleted {deleted_count} existing programs") + # Clean up orphaned programs for unmapped EPG entries + # These accumulate if a channel is unmapped after being mapped + unmapped_epg_ids = EPGData.objects.filter( + epg_source=epg_source + ).exclude(id__in=mapped_epg_ids).values_list('id', flat=True) + + if unmapped_epg_ids: + orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0] + if orphaned_count > 0: + logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries") + # SINGLE PASS PARSING: Parse the XML file once and process all mapped channels programs_to_create = [] programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel From 5693ee7f9e3431d1b1fe86df48bc5e9951439a2a Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 4 Dec 2025 14:57:57 -0600 Subject: [PATCH 19/70] perf: optimize EPG program parsing and implement atomic database updates to reduce I/O overhead and prevent partial data visibility --- CHANGELOG.md | 2 +- apps/epg/tasks.py | 105 +++++++++++++++++++++++++++------------------- 2 files changed, 63 insertions(+), 44 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bc21c7c7..e46bffe19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed -- **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. +- **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) - nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 4a4adddd6..9fa999cd1 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1497,28 +1497,13 @@ def parse_programs_for_source(epg_source, tvg_id=None): send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided") return False - # Delete existing programs for all mapped EPGs in one query - logger.info(f"Deleting existing programs for {mapped_count} mapped EPG entries...") - deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0] - logger.info(f"Deleted {deleted_count} existing programs") - - # Clean up orphaned programs for unmapped EPG entries - # These accumulate if a channel is unmapped after being mapped - unmapped_epg_ids = EPGData.objects.filter( - epg_source=epg_source - ).exclude(id__in=mapped_epg_ids).values_list('id', flat=True) - - if unmapped_epg_ids: - orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0] - if orphaned_count > 0: - logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries") - - # SINGLE PASS PARSING: Parse the XML file once and process all mapped channels - programs_to_create = [] + # SINGLE PASS PARSING: Parse the XML file once and collect all programs in memory + # We parse FIRST, then do an atomic delete+insert to avoid race conditions + # where clients might see empty/partial EPG data during the transition + all_programs_to_create = [] programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel total_programs = 0 skipped_programs = 0 - batch_size = 1000 last_progress_update = 0 try: @@ -1563,7 +1548,7 @@ def parse_programs_for_source(epg_source, tvg_id=None): custom_properties_json = custom_props if custom_props else None epg_id = tvg_id_to_epg_id[channel_id] - programs_to_create.append(ProgramData( + all_programs_to_create.append(ProgramData( epg_id=epg_id, start_time=start_time, end_time=end_time, @@ -1579,35 +1564,23 @@ def parse_programs_for_source(epg_source, tvg_id=None): # Clear the element to free memory clear_element(elem) - # Batch processing - if len(programs_to_create) >= batch_size: - ProgramData.objects.bulk_create(programs_to_create) - logger.debug(f"Saved batch of {len(programs_to_create)} programs (total: {total_programs})") - programs_to_create = [] - - # Send progress update (estimate based on programs processed) - # We don't know total programs upfront, so use a rough estimate - if total_programs - last_progress_update >= 5000: - last_progress_update = total_programs - # Cap at 90% until we're done - progress = min(90, 10 + int((total_programs / max(total_programs + 10000, 1)) * 80)) - send_epg_update(epg_source.id, "parsing_programs", progress, - processed=total_programs, channels=mapped_count) - - # Periodic garbage collection - if total_programs % (batch_size * 5) == 0: - gc.collect() + # Send progress update (estimate based on programs processed) + if total_programs - last_progress_update >= 5000: + last_progress_update = total_programs + # Cap at 70% during parsing phase (save 30% for DB operations) + progress = min(70, 10 + int((total_programs / max(total_programs + 10000, 1)) * 60)) + send_epg_update(epg_source.id, "parsing_programs", progress, + processed=total_programs, channels=mapped_count) + + # Periodic garbage collection during parsing + if total_programs % 5000 == 0: + gc.collect() except Exception as e: logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True) clear_element(elem) continue - # Process any remaining items - if programs_to_create: - ProgramData.objects.bulk_create(programs_to_create) - logger.debug(f"Saved final batch of {len(programs_to_create)} programs") - except etree.XMLSyntaxError as xml_error: logger.error(f"XML syntax error parsing program data: {xml_error}") epg_source.status = EPGSource.STATUS_ERROR @@ -1622,6 +1595,52 @@ def parse_programs_for_source(epg_source, tvg_id=None): if source_file: source_file.close() source_file = None + + # Now perform atomic delete + bulk insert + # This ensures clients never see empty/partial EPG data + logger.info(f"Parsed {total_programs} programs, performing atomic database update...") + send_epg_update(epg_source.id, "parsing_programs", 75, message="Updating database...") + + batch_size = 1000 + try: + with transaction.atomic(): + # Delete existing programs for mapped EPGs + deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0] + logger.debug(f"Deleted {deleted_count} existing programs") + + # Clean up orphaned programs for unmapped EPG entries + unmapped_epg_ids = list(EPGData.objects.filter( + epg_source=epg_source + ).exclude(id__in=mapped_epg_ids).values_list('id', flat=True)) + + if unmapped_epg_ids: + orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0] + if orphaned_count > 0: + logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries") + + # Bulk insert all new programs in batches within the same transaction + for i in range(0, len(all_programs_to_create), batch_size): + batch = all_programs_to_create[i:i + batch_size] + ProgramData.objects.bulk_create(batch) + + # Update progress during insertion + progress = 75 + int((i / len(all_programs_to_create)) * 20) if all_programs_to_create else 95 + if i % (batch_size * 5) == 0: + send_epg_update(epg_source.id, "parsing_programs", min(95, progress), + message=f"Inserting programs... {i}/{len(all_programs_to_create)}") + + logger.info(f"Atomic update complete: deleted {deleted_count}, inserted {total_programs} programs") + + except Exception as db_error: + logger.error(f"Database error during atomic update: {db_error}", exc_info=True) + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"Database error: {str(db_error)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(db_error)) + return False + finally: + # Clear the large list to free memory + all_programs_to_create = None gc.collect() # Count channels that actually got programs From 6c8270d0e582ea7667db75036b354de72e972673 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 4 Dec 2025 15:28:21 -0600 Subject: [PATCH 20/70] Enhancement: Add support for 'extracting' status and display additional progress information in EPGsTable --- CHANGELOG.md | 1 + frontend/src/components/tables/EPGsTable.jsx | 63 +++++++++++++++++--- 2 files changed, 56 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e46bffe19..0de26314b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. +- EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) - nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) diff --git a/frontend/src/components/tables/EPGsTable.jsx b/frontend/src/components/tables/EPGsTable.jsx index 71e920e07..b8dfeb6d1 100644 --- a/frontend/src/components/tables/EPGsTable.jsx +++ b/frontend/src/components/tables/EPGsTable.jsx @@ -160,6 +160,9 @@ const EPGsTable = () => { case 'downloading': label = 'Downloading'; break; + case 'extracting': + label = 'Extracting'; + break; case 'parsing_channels': label = 'Parsing Channels'; break; @@ -170,6 +173,22 @@ const EPGsTable = () => { return null; } + // Build additional info string from progress data + let additionalInfo = ''; + if (progress.message) { + additionalInfo = progress.message; + } else if ( + progress.processed !== undefined && + progress.channels !== undefined + ) { + additionalInfo = `${progress.processed.toLocaleString()} programs for ${progress.channels} channels`; + } else if ( + progress.processed !== undefined && + progress.total !== undefined + ) { + additionalInfo = `${progress.processed.toLocaleString()} / ${progress.total.toLocaleString()}`; + } + return ( @@ -181,7 +200,14 @@ const EPGsTable = () => { style={{ margin: '2px 0' }} /> {progress.speed && ( - Speed: {parseInt(progress.speed)} KB/s + + Speed: {parseInt(progress.speed)} KB/s + + )} + {additionalInfo && ( + + {additionalInfo} + )} ); @@ -286,14 +312,35 @@ const EPGsTable = () => { // Show success message for successful sources if (data.status === 'success') { + const successMessage = + data.last_message || 'EPG data refreshed successfully'; return ( - - EPG data refreshed successfully - + + + {successMessage} + + + ); + } + + // Show last_message for idle sources (from previous refresh) + if (data.status === 'idle' && data.last_message) { + return ( + + + {data.last_message} + + ); } From 3b34fb11ef0d401c07be61ec8f0c76228fdf3485 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 4 Dec 2025 15:43:33 -0600 Subject: [PATCH 21/70] Fix: Fixes bug where Updated column wouldn't update in the EPG table without a webui refresh. --- CHANGELOG.md | 3 ++- apps/epg/tasks.py | 3 ++- frontend/src/WebSocket.jsx | 10 ++++++++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0de26314b..8a77b7b72 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. -- EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) +- EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) (Closes #214) +- EPG table "Updated" column now updates in real-time via WebSocket using the actual backend timestamp instead of requiring a page refresh - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) - nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 9fa999cd1..c565dbf50 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1668,7 +1668,8 @@ def parse_programs_for_source(epg_source, tvg_id=None): # Send completion notification with status send_epg_update(epg_source.id, "parsing_programs", 100, status="success", - message=epg_source.last_message) + message=epg_source.last_message, + updated_at=epg_source.updated_at.isoformat()) logger.info(f"Completed parsing programs for source: {epg_source.name} - " f"{total_programs:,} programs for {channels_with_programs} channels, " diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index f2e28ae93..40035d33b 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -574,7 +574,7 @@ export const WebsocketProvider = ({ children }) => { const sourceId = parsedEvent.data.source || parsedEvent.data.account; const epg = epgs[sourceId]; - + // Only update progress if the EPG still exists in the store // This prevents crashes when receiving updates for deleted EPGs if (epg) { @@ -582,7 +582,9 @@ export const WebsocketProvider = ({ children }) => { updateEPGProgress(parsedEvent.data); } else { // EPG was deleted, ignore this update - console.debug(`Ignoring EPG refresh update for deleted EPG ${sourceId}`); + console.debug( + `Ignoring EPG refresh update for deleted EPG ${sourceId}` + ); break; } @@ -621,6 +623,10 @@ export const WebsocketProvider = ({ children }) => { status: parsedEvent.data.status || 'success', last_message: parsedEvent.data.message || epg.last_message, + // Use the timestamp from the backend if provided + ...(parsedEvent.data.updated_at && { + updated_at: parsedEvent.data.updated_at, + }), }); // Only show success notification if we've finished parsing programs and had no errors From 0d177e44f8ce8cdb2e7dba10bc9266f2b973849e Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Thu, 4 Dec 2025 15:45:09 -0600 Subject: [PATCH 22/70] changelog: Change updated change to bug fix instead of change. --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a77b7b72..0b95749ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,10 +15,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. - EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) (Closes #214) -- EPG table "Updated" column now updates in real-time via WebSocket using the actual backend timestamp instead of requiring a page refresh - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) - nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) +### Fixed + +- EPG table "Updated" column now updates in real-time via WebSocket using the actual backend timestamp instead of requiring a page refresh + ## [0.13.0] - 2025-12-02 ### Added From c1d960138e4f455543caac34e0b6ef8ca16911e8 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 5 Dec 2025 09:02:03 -0600 Subject: [PATCH 23/70] Fix: Bulk channel editor confirmation dialog now shows the correct stream profile that will be set. --- CHANGELOG.md | 1 + frontend/src/components/forms/ChannelBatch.jsx | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b95749ed..0b0223f9e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - EPG table "Updated" column now updates in real-time via WebSocket using the actual backend timestamp instead of requiring a page refresh +- Bulk channel editor confirmation dialog now displays the correct stream profile name that will be applied to the selected channels. ## [0.13.0] - 2025-12-02 diff --git a/frontend/src/components/forms/ChannelBatch.jsx b/frontend/src/components/forms/ChannelBatch.jsx index e42d418cc..a1cebe54b 100644 --- a/frontend/src/components/forms/ChannelBatch.jsx +++ b/frontend/src/components/forms/ChannelBatch.jsx @@ -135,8 +135,10 @@ const ChannelBatchForm = ({ channelIds, isOpen, onClose }) => { if (values.stream_profile_id === '0') { changes.push(`• Stream Profile: Use Default`); } else { - const profileName = - streamProfiles[values.stream_profile_id]?.name || 'Selected Profile'; + const profile = streamProfiles.find( + (p) => `${p.id}` === `${values.stream_profile_id}` + ); + const profileName = profile?.name || 'Selected Profile'; changes.push(`• Stream Profile: ${profileName}`); } } From 759569b871973253c89dd7b625e1272fe5e9c7eb Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Fri, 5 Dec 2025 09:54:11 -0600 Subject: [PATCH 24/70] Enhancement: Add a priority field to EPGSource and prefer higher-priority sources when matching channels. Also ignore EPG sources where is_active is false during matching, and update serializers/forms/frontend accordingly.(Closes #603, #672) --- CHANGELOG.md | 3 ++ apps/channels/tasks.py | 37 ++++++++++++++----- .../epg/migrations/0021_epgsource_priority.py | 18 +++++++++ apps/epg/models.py | 4 ++ apps/epg/serializers.py | 1 + frontend/src/components/forms/EPG.jsx | 28 ++++++++++---- 6 files changed, 73 insertions(+), 18 deletions(-) create mode 100644 apps/epg/migrations/0021_epgsource_priority.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b0223f9e..d58c0ce1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Sort buttons for 'Group' and 'M3U' columns in Streams table for improved stream organization and filtering - Thanks [@bobey6](https://github.com/bobey6) +- EPG source priority field for controlling which EPG source is preferred when multiple sources have matching entries for a channel (higher numbers = higher priority) (Closes #603) ### Changed @@ -17,6 +18,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) (Closes #214) - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) - nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) +- EPG matching now respects source priority and only uses active (enabled) EPG sources (Closes #672) +- EPG form API Key field now only visible when Schedules Direct source type is selected ### Fixed diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index 5a9528a78..7ca73ac28 100755 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -295,7 +295,11 @@ def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True if score > 50: # Only show decent matches logger.debug(f" EPG '{row['name']}' (norm: '{row['norm_name']}') => score: {score} (base: {base_score}, bonus: {bonus})") - if score > best_score: + # When scores are equal, prefer higher priority EPG source + row_priority = row.get('epg_source_priority', 0) + best_priority = best_epg.get('epg_source_priority', 0) if best_epg else -1 + + if score > best_score or (score == best_score and row_priority > best_priority): best_score = score best_epg = row @@ -471,9 +475,9 @@ def match_epg_channels(): "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! }) - # Get all EPG data + # Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches epg_data = [] - for epg in EPGData.objects.all(): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True): normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data.append({ 'id': epg.id, @@ -482,9 +486,13 @@ def match_epg_channels(): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) - logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries") + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True) + + logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries (from active sources only)") # Run EPG matching with progress updates - automatically uses conservative thresholds for bulk operations result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) @@ -618,9 +626,9 @@ def match_selected_channels_epg(channel_ids): "norm_chan": normalize_name(channel.name) }) - # Get all EPG data + # Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches epg_data = [] - for epg in EPGData.objects.all(): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True): normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data.append({ 'id': epg.id, @@ -629,9 +637,13 @@ def match_selected_channels_epg(channel_ids): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) - logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries") + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True) + + logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries (from active sources only)") # Run EPG matching with progress updates - automatically uses appropriate thresholds result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) @@ -749,9 +761,10 @@ def match_single_channel_epg(channel_id): test_normalized = normalize_name(test_name) logger.debug(f"DEBUG normalization example: '{test_name}' → '{test_normalized}' (call sign preserved)") - # Get all EPG data for matching - must include norm_name field + # Get all EPG data for matching from active sources - must include norm_name field + # Ordered by source priority (highest first) so we prefer higher priority matches epg_data_list = [] - for epg in EPGData.objects.filter(name__isnull=False).exclude(name=''): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True, name__isnull=False).exclude(name=''): normalized_epg_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data_list.append({ 'id': epg.id, @@ -760,10 +773,14 @@ def match_single_channel_epg(channel_id): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data_list.sort(key=lambda x: x['epg_source_priority'], reverse=True) + if not epg_data_list: - return {"matched": False, "message": "No EPG data available for matching"} + return {"matched": False, "message": "No EPG data available for matching (from active sources)"} logger.info(f"Matching single channel '{channel.name}' against {len(epg_data_list)} EPG entries") diff --git a/apps/epg/migrations/0021_epgsource_priority.py b/apps/epg/migrations/0021_epgsource_priority.py new file mode 100644 index 000000000..f2696d674 --- /dev/null +++ b/apps/epg/migrations/0021_epgsource_priority.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-12-05 15:24 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0020_migrate_time_to_starttime_placeholders'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='priority', + field=models.PositiveIntegerField(default=0, help_text='Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel.'), + ), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index e5f3847bd..b3696edc3 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -45,6 +45,10 @@ class EPGSource(models.Model): null=True, help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)" ) + priority = models.PositiveIntegerField( + default=0, + help_text="Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel." + ) status = models.CharField( max_length=20, choices=STATUS_CHOICES, diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index bfb750fc0..e4d5f4668 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -24,6 +24,7 @@ class Meta: 'is_active', 'file_path', 'refresh_interval', + 'priority', 'status', 'last_message', 'created_at', diff --git a/frontend/src/components/forms/EPG.jsx b/frontend/src/components/forms/EPG.jsx index db4f8310a..50c8553c8 100644 --- a/frontend/src/components/forms/EPG.jsx +++ b/frontend/src/components/forms/EPG.jsx @@ -29,6 +29,7 @@ const EPG = ({ epg = null, isOpen, onClose }) => { api_key: '', is_active: true, refresh_interval: 24, + priority: 0, }, validate: { @@ -69,6 +70,7 @@ const EPG = ({ epg = null, isOpen, onClose }) => { api_key: epg.api_key, is_active: epg.is_active, refresh_interval: epg.refresh_interval, + priority: epg.priority ?? 0, }; form.setValues(values); setSourceType(epg.source_type); @@ -148,14 +150,24 @@ const EPG = ({ epg = null, isOpen, onClose }) => { key={form.key('url')} /> - + )} + + {/* Put checkbox at the same level as Refresh Interval */} From f3a901cb3a50f16f104598ec615cd6bd1a2ffc35 Mon Sep 17 00:00:00 2001 From: dekzter Date: Sat, 6 Dec 2025 13:40:10 -0500 Subject: [PATCH 25/70] Security Fix - generate JWT on application init --- dispatcharr/settings.py | 2 +- docker/entrypoint.sh | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index d6c29dd9c..5f8c23e27 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -4,7 +4,7 @@ BASE_DIR = Path(__file__).resolve().parent.parent -SECRET_KEY = "REPLACE_ME_WITH_A_REAL_SECRET" +SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY") REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") REDIS_DB = os.environ.get("REDIS_DB", "0") diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index fa0eea01f..9c3ec88c0 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -40,6 +40,22 @@ export REDIS_DB=${REDIS_DB:-0} export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191} export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri' export LD_LIBRARY_PATH='/usr/local/lib' +export SECRET_FILE="/data/jwt" + +if [ ! -f "$SECRET_FILE" ]; then + umask 077 + tmpfile="$(mktemp "${SECRET_FILE}.XXXXXX")" || { echo "mktemp failed"; exit 1; } + python3 - <<'PY' >"$tmpfile" || { echo "secret generation failed"; rm -f "$tmpfile"; exit 1; } +import secrets +print(secrets.token_urlsafe(64)) +PY + mv -f "$tmpfile" "$SECRET_FILE" || { echo "move failed"; rm -f "$tmpfile"; exit 1; } +fi + +chown $PUID:$PGID "$SECRET_FILE" || true +chmod 600 "$SECRET_FILE" || true + +export DJANGO_SECRET_KEY="$(cat "$SECRET_FILE")" # Process priority configuration # UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority) @@ -90,7 +106,7 @@ if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH - CELERY_NICE_LEVEL UWSGI_NICE_LEVEL + CELERY_NICE_LEVEL UWSGI_NICE_LEVEL DJANGO_SECRET_KEY ) # Process each variable for both profile.d and environment From 10f329d67380eca9a619d1208d51ab88920c9601 Mon Sep 17 00:00:00 2001 From: dekzter Date: Sat, 6 Dec 2025 13:42:48 -0500 Subject: [PATCH 26/70] release notes for built --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f082eb81..f9b7b4504 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Fixed + +- JWT token generated so is unique for each deployment + ## [0.13.0] - 2025-12-02 ### Added From a9120552551860b40ecff123fc68fc3a803234cc Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Sat, 6 Dec 2025 18:43:16 +0000 Subject: [PATCH 27/70] Release v0.13.1 --- CHANGELOG.md | 2 ++ version.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9b7b4504..bf381879a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.13.1] - 2025-12-06 + ### Fixed - JWT token generated so is unique for each deployment diff --git a/version.py b/version.py index b27fed864..f017df852 100644 --- a/version.py +++ b/version.py @@ -1,5 +1,5 @@ """ Dispatcharr version information. """ -__version__ = '0.13.0' # Follow semantic versioning (MAJOR.MINOR.PATCH) +__version__ = '0.13.1' # Follow semantic versioning (MAJOR.MINOR.PATCH) __timestamp__ = None # Set during CI/CD build process From d0edc3fa072f726b3f6a6117a1ea16b38f8eeda3 Mon Sep 17 00:00:00 2001 From: dekzter Date: Sun, 7 Dec 2025 07:54:30 -0500 Subject: [PATCH 28/70] remove permission lines to see if this resolves lack of django secret key in environment profile.d --- docker/entrypoint.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 9c3ec88c0..df1584b09 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -52,9 +52,6 @@ PY mv -f "$tmpfile" "$SECRET_FILE" || { echo "move failed"; rm -f "$tmpfile"; exit 1; } fi -chown $PUID:$PGID "$SECRET_FILE" || true -chmod 600 "$SECRET_FILE" || true - export DJANGO_SECRET_KEY="$(cat "$SECRET_FILE")" # Process priority configuration From 3512c3a6233844b41ed2c8132bb0fdcfdf3f3740 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sun, 7 Dec 2025 19:05:31 -0600 Subject: [PATCH 29/70] Add DJANGO_SECRET_KEY environment variable to uwsgi configuration files --- docker/uwsgi.debug.ini | 2 +- docker/uwsgi.dev.ini | 1 + docker/uwsgi.ini | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index 3de890a53..1d7cca936 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -20,7 +20,7 @@ module = scripts.debug_wrapper:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings - +env = DJANGO_SECRET_KEY=$(DJANGO_SECRET_KEY) socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true diff --git a/docker/uwsgi.dev.ini b/docker/uwsgi.dev.ini index e476e216c..1ef9709ec 100644 --- a/docker/uwsgi.dev.ini +++ b/docker/uwsgi.dev.ini @@ -22,6 +22,7 @@ module = dispatcharr.wsgi:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings +env = DJANGO_SECRET_KEY=$(DJANGO_SECRET_KEY) socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index f8fe8ab7c..bb359b064 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -21,6 +21,7 @@ module = dispatcharr.wsgi:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings +env = DJANGO_SECRET_KEY=$(DJANGO_SECRET_KEY) socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true From cf37c6fd9869e8210589a8a99331da702030c2db Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sun, 7 Dec 2025 19:06:45 -0600 Subject: [PATCH 30/70] changelog: Updated changelog for 0.13.1 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb8324f15..90db90c62 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed +- JWT token generated so is unique for each deployment + ## [0.13.0] - 2025-12-02 ### Added From 2155229d7f0ce6ad079e3b802588822f1ef1b6b4 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sun, 7 Dec 2025 19:40:32 -0600 Subject: [PATCH 31/70] Fix uwsgi command path in entrypoint script --- docker/entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index df1584b09..088bcd1ef 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -200,7 +200,7 @@ fi # Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose # Start with nice as root, then use setpriv to drop privileges to dispatch user # This preserves both the nice value and environment variables -nice -n $UWSGI_NICE_LEVEL su -p - "$POSTGRES_USER" -c "cd /app && exec uwsgi $uwsgi_args" & uwsgi_pid=$! +nice -n $UWSGI_NICE_LEVEL su - "$POSTGRES_USER" -c "cd /app && exec /dispatcharrpy/bin/uwsgi $uwsgi_args" & uwsgi_pid=$! echo "✅ uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)" pids+=("$uwsgi_pid") From e2736babaae4db7393560f7c1ae50d49a611baf9 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sun, 7 Dec 2025 20:04:58 -0600 Subject: [PATCH 32/70] Reset umask after creating secret file. --- docker/entrypoint.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 088bcd1ef..72eb59282 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -41,8 +41,10 @@ export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191} export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri' export LD_LIBRARY_PATH='/usr/local/lib' export SECRET_FILE="/data/jwt" - +# Ensure Django secret key exists or generate a new one if [ ! -f "$SECRET_FILE" ]; then + echo "Generating new Django secret key..." + old_umask=$(umask) umask 077 tmpfile="$(mktemp "${SECRET_FILE}.XXXXXX")" || { echo "mktemp failed"; exit 1; } python3 - <<'PY' >"$tmpfile" || { echo "secret generation failed"; rm -f "$tmpfile"; exit 1; } @@ -50,8 +52,8 @@ import secrets print(secrets.token_urlsafe(64)) PY mv -f "$tmpfile" "$SECRET_FILE" || { echo "move failed"; rm -f "$tmpfile"; exit 1; } + umask $old_umask fi - export DJANGO_SECRET_KEY="$(cat "$SECRET_FILE")" # Process priority configuration From ce70b04097cb5fa0e52f500035fad4f7dcab73f5 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sun, 7 Dec 2025 20:56:59 -0600 Subject: [PATCH 33/70] changelog: update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90db90c62..8efdc30cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - EPG table "Updated" column now updates in real-time via WebSocket using the actual backend timestamp instead of requiring a page refresh - Bulk channel editor confirmation dialog now displays the correct stream profile name that will be applied to the selected channels. +- uWSGI not found and 502 bad gateway on first startup ## [0.13.1] - 2025-12-06 From c03ddf60a09175631e868bd5d647ba1484426ad2 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Sun, 7 Dec 2025 21:28:04 -0600 Subject: [PATCH 34/70] Fixed verbiage for epg parsing status. --- apps/epg/tasks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index c565dbf50..bd78c6a33 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1650,7 +1650,7 @@ def parse_programs_for_source(epg_source, tvg_id=None): epg_source.status = EPGSource.STATUS_SUCCESS epg_source.last_message = ( f"Parsed {total_programs:,} programs for {channels_with_programs} channels " - f"(skipped {skipped_programs:,} programmes for {total_epg_count - mapped_count} unmapped channels)" + f"(skipped {skipped_programs:,} programs for {total_epg_count - mapped_count} unmapped channels)" ) epg_source.updated_at = timezone.now() epg_source.save(update_fields=['status', 'last_message', 'updated_at']) @@ -1672,8 +1672,8 @@ def parse_programs_for_source(epg_source, tvg_id=None): updated_at=epg_source.updated_at.isoformat()) logger.info(f"Completed parsing programs for source: {epg_source.name} - " - f"{total_programs:,} programs for {channels_with_programs} channels, " - f"skipped {skipped_programs:,} programmes for unmapped channels") + f"{total_programs:,} programs for {channels_with_programs} channels, " + f"skipped {skipped_programs:,} programs for unmapped channels") return True except Exception as e: From 43b55e2d9913a71ffec5a1998079e7ea2dd4be3c Mon Sep 17 00:00:00 2001 From: dekzter Date: Mon, 8 Dec 2025 08:38:39 -0500 Subject: [PATCH 35/70] first run at hiding disabled channels in channel profiles --- apps/channels/api_views.py | 21 ++++++++++++++++ .../src/components/tables/ChannelsTable.jsx | 24 +++++++++++++++---- .../ChannelsTable/ChannelTableHeader.jsx | 14 +++++++++++ 3 files changed, 54 insertions(+), 5 deletions(-) diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index eccc50280..4cfe97772 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -419,6 +419,27 @@ def get_queryset(self): group_names = channel_group.split(",") qs = qs.filter(channel_group__name__in=group_names) + channel_profile_id = self.request.query_params.get("channel_profile_id") + show_disabled_param = self.request.query_params.get("show_disabled", None) + + if channel_profile_id: + try: + profile_id_int = int(channel_profile_id) + # If show_disabled is present, include all memberships for that profile. + # If absent, restrict to enabled=True. + if show_disabled_param is None: + qs = qs.filter( + channelprofilemembership__channel_profile_id=profile_id_int, + channelprofilemembership__enabled=True, + ) + else: + qs = qs.filter( + channelprofilemembership__channel_profile_id=profile_id_int + ) + except (ValueError, TypeError): + # Ignore invalid profile id values + pass + if self.request.user.user_level < 10: qs = qs.filter(user_level__lte=self.request.user.user_level) diff --git a/frontend/src/components/tables/ChannelsTable.jsx b/frontend/src/components/tables/ChannelsTable.jsx index 9b9958f73..949b97606 100644 --- a/frontend/src/components/tables/ChannelsTable.jsx +++ b/frontend/src/components/tables/ChannelsTable.jsx @@ -68,7 +68,7 @@ const epgUrlBase = `${window.location.protocol}//${window.location.host}/output/ const hdhrUrlBase = `${window.location.protocol}//${window.location.host}/hdhr`; const ChannelEnabledSwitch = React.memo( - ({ rowId, selectedProfileId, selectedTableIds }) => { + ({ rowId, selectedProfileId, selectedTableIds, setSelectedTableIds }) => { // Directly extract the channels set once to avoid re-renders on every change. const isEnabled = useChannelsStore( useCallback( @@ -79,16 +79,20 @@ const ChannelEnabledSwitch = React.memo( ) ); - const handleToggle = () => { + const handleToggle = async () => { if (selectedTableIds.length > 1) { - API.updateProfileChannels( + await API.updateProfileChannels( selectedTableIds, selectedProfileId, !isEnabled ); } else { - API.updateProfileChannel(rowId, selectedProfileId, !isEnabled); + await API.updateProfileChannel(rowId, selectedProfileId, !isEnabled); } + + setSelectedTableIds([]); + + return API.requeryChannels(); }; return ( @@ -289,6 +293,7 @@ const ChannelsTable = ({}) => { const [selectedProfile, setSelectedProfile] = useState( profiles[selectedProfileId] ); + const [showDisabled, setShowDisabled] = useState(true); const [paginationString, setPaginationString] = useState(''); const [filters, setFilters] = useState({ @@ -369,6 +374,12 @@ const ChannelsTable = ({}) => { params.append('page', pagination.pageIndex + 1); params.append('page_size', pagination.pageSize); params.append('include_streams', 'true'); + if (selectedProfileId !== '0') { + params.append('channel_profile_id', selectedProfileId); + } + if (showDisabled === true) { + params.append('show_disabled', true); + } // Apply sorting if (sorting.length > 0) { @@ -401,7 +412,7 @@ const ChannelsTable = ({}) => { pageSize: pagination.pageSize, }); setAllRowIds(ids); - }, [pagination, sorting, debouncedFilters]); + }, [pagination, sorting, debouncedFilters, showDisabled, selectedProfileId]); const stopPropagation = useCallback((e) => { e.stopPropagation(); @@ -728,6 +739,7 @@ const ChannelsTable = ({}) => { rowId={row.original.id} selectedProfileId={selectedProfileId} selectedTableIds={table.getState().selectedTableIds} + setSelectedTableIds={table.setSelectedTableIds} /> ); }, @@ -1326,6 +1338,8 @@ const ChannelsTable = ({}) => { deleteChannels={deleteChannels} selectedTableIds={table.selectedTableIds} table={table} + showDisabled={showDisabled} + setShowDisabled={setShowDisabled} /> {/* Table or ghost empty state inside Paper */} diff --git a/frontend/src/components/tables/ChannelsTable/ChannelTableHeader.jsx b/frontend/src/components/tables/ChannelsTable/ChannelTableHeader.jsx index b7e04d7dd..d3376b4d5 100644 --- a/frontend/src/components/tables/ChannelsTable/ChannelTableHeader.jsx +++ b/frontend/src/components/tables/ChannelsTable/ChannelTableHeader.jsx @@ -26,6 +26,8 @@ import { SquarePen, SquarePlus, Settings, + Eye, + EyeOff, } from 'lucide-react'; import API from '../../../api'; import { notifications } from '@mantine/notifications'; @@ -102,6 +104,8 @@ const ChannelTableHeader = ({ editChannel, deleteChannels, selectedTableIds, + showDisabled, + setShowDisabled, }) => { const theme = useMantineTheme(); @@ -208,6 +212,10 @@ const ChannelTableHeader = ({ ); }; + const toggleShowDisabled = () => { + setShowDisabled(!showDisabled); + }; + return ( @@ -226,6 +234,12 @@ const ChannelTableHeader = ({ + + + + Date: Mon, 8 Dec 2025 17:27:07 -0600 Subject: [PATCH 36/70] Remove DJANGO_SECRET_KEY environment variable from uwsgi configuration files --- docker/uwsgi.debug.ini | 1 - docker/uwsgi.dev.ini | 1 - docker/uwsgi.ini | 1 - 3 files changed, 3 deletions(-) diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index 1d7cca936..69c040f22 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -20,7 +20,6 @@ module = scripts.debug_wrapper:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings -env = DJANGO_SECRET_KEY=$(DJANGO_SECRET_KEY) socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true diff --git a/docker/uwsgi.dev.ini b/docker/uwsgi.dev.ini index 1ef9709ec..e476e216c 100644 --- a/docker/uwsgi.dev.ini +++ b/docker/uwsgi.dev.ini @@ -22,7 +22,6 @@ module = dispatcharr.wsgi:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings -env = DJANGO_SECRET_KEY=$(DJANGO_SECRET_KEY) socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index bb359b064..f8fe8ab7c 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -21,7 +21,6 @@ module = dispatcharr.wsgi:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings -env = DJANGO_SECRET_KEY=$(DJANGO_SECRET_KEY) socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true From 98b29f97a1df25395cf72cbb9612c54c06367870 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 8 Dec 2025 17:49:40 -0600 Subject: [PATCH 37/70] changelog: Update verbiage --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8efdc30cb..347bb22ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed -- **Performance**: EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. +- EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. - EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) (Closes #214) - IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) - nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) From 4df4e5f963606c66c137cc1b969355cdcf47e2bd Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Tue, 9 Dec 2025 00:01:50 +0000 Subject: [PATCH 38/70] Release v0.14.0 --- CHANGELOG.md | 2 ++ version.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 347bb22ae..4716c250f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.14.0] - 2025-12-09 + ### Added - Sort buttons for 'Group' and 'M3U' columns in Streams table for improved stream organization and filtering - Thanks [@bobey6](https://github.com/bobey6) diff --git a/version.py b/version.py index f017df852..807fc629a 100644 --- a/version.py +++ b/version.py @@ -1,5 +1,5 @@ """ Dispatcharr version information. """ -__version__ = '0.13.1' # Follow semantic versioning (MAJOR.MINOR.PATCH) +__version__ = '0.14.0' # Follow semantic versioning (MAJOR.MINOR.PATCH) __timestamp__ = None # Set during CI/CD build process From 69f9ecd93c7868f57fd9e58c8339cc7adef969b9 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 8 Dec 2025 20:12:44 -0600 Subject: [PATCH 39/70] Bug Fix: Remove ipv6 binding from nginx config if ipv6 is not available. --- CHANGELOG.md | 4 ++++ docker/init/03-init-dispatcharr.sh | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4716c250f..389bb8ad5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Fixed + +- nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup + ## [0.14.0] - 2025-12-09 ### Added diff --git a/docker/init/03-init-dispatcharr.sh b/docker/init/03-init-dispatcharr.sh index 5fbef23d1..da7d44840 100644 --- a/docker/init/03-init-dispatcharr.sh +++ b/docker/init/03-init-dispatcharr.sh @@ -29,9 +29,17 @@ if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then chown $PUID:$PGID /app fi fi - +# Configure nginx port sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default +# Configure nginx based on IPv6 availability +if ip -6 addr show | grep -q "inet6"; then + echo "✅ IPv6 is available, enabling IPv6 in nginx" +else + echo "⚠️ IPv6 not available, disabling IPv6 in nginx" + sed -i '/listen \[::\]:/d' /etc/nginx/sites-enabled/default +fi + # NOTE: mac doesn't run as root, so only manage permissions # if this script is running as root if [ "$(id -u)" = "0" ]; then From 514e7e06e4dfcdb8d24ed0eddfd3cf67cc2a7a49 Mon Sep 17 00:00:00 2001 From: SergeantPanda Date: Mon, 8 Dec 2025 20:50:50 -0600 Subject: [PATCH 40/70] Bug fix: EPG API now returns correct date/time format for start/end fields and proper string types for timestamps and channel_id --- CHANGELOG.md | 1 + apps/output/views.py | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 389bb8ad5..e363135f4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Fixed - nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup +- XtreamCodes EPG API now returns correct date/time format for start/end fields and proper string types for timestamps and channel_id ## [0.14.0] - 2025-12-09 diff --git a/apps/output/views.py b/apps/output/views.py index bc2bace5f..3a8406cb7 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -2316,18 +2316,18 @@ def xc_get_epg(request, user, short=False): "epg_id": f"{epg_id}", "title": base64.b64encode(title.encode()).decode(), "lang": "", - "start": start.strftime("%Y%m%d%H%M%S"), - "end": end.strftime("%Y%m%d%H%M%S"), + "start": start.strftime("%Y-%m-%d %H:%M:%S"), + "end": end.strftime("%Y-%m-%d %H:%M:%S"), "description": base64.b64encode(description.encode()).decode(), - "channel_id": channel_num_int, - "start_timestamp": int(start.timestamp()), - "stop_timestamp": int(end.timestamp()), + "channel_id": str(channel_num_int), + "start_timestamp": str(int(start.timestamp())), + "stop_timestamp": str(int(end.timestamp())), "stream_id": f"{channel_id}", } if short == False: program_output["now_playing"] = 1 if start <= django_timezone.now() <= end else 0 - program_output["has_archive"] = "0" + program_output["has_archive"] = 0 output['epg_listings'].append(program_output) From e8fb01ebdd23071818da88040ec8bee7b08cebfc Mon Sep 17 00:00:00 2001 From: DawtCom Date: Mon, 8 Dec 2025 21:50:13 -0600 Subject: [PATCH 41/70] Removing unreachable code --- apps/output/views.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/apps/output/views.py b/apps/output/views.py index bc2bace5f..07098268e 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -161,18 +161,7 @@ def generate_m3u(request, profile_name=None, user=None): channelprofilemembership__enabled=True ).order_by('channel_number') else: - if profile_name is not None: - try: - channel_profile = ChannelProfile.objects.get(name=profile_name) - except ChannelProfile.DoesNotExist: - logger.warning("Requested channel profile (%s) during m3u generation does not exist", profile_name) - raise Http404(f"Channel profile '{profile_name}' not found") - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True, - ).order_by("channel_number") - else: - channels = Channel.objects.order_by("channel_number") + channels = Channel.objects.order_by("channel_number") # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' From 806f78244df976bef7abd4f0402d763d1a1c36ee Mon Sep 17 00:00:00 2001 From: Jim McBride Date: Tue, 9 Dec 2025 07:49:31 -0600 Subject: [PATCH 42/70] Add proper ConfirmationDialog usage to BackupManager - Import useWarningsStore from warnings store - Add suppressWarning hook to component - Add actionKey props to restore and delete confirmation dialogs - Add onSuppressChange callback to enable "Don't ask again" functionality This aligns BackupManager with the project's standard confirmation dialog pattern used throughout the codebase (ChannelsTable, EPGsTable, etc). --- frontend/src/components/backups/BackupManager.jsx | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx index 46bd06890..0246d47f0 100644 --- a/frontend/src/components/backups/BackupManager.jsx +++ b/frontend/src/components/backups/BackupManager.jsx @@ -30,6 +30,7 @@ import { notifications } from '@mantine/notifications'; import API from '../../api'; import ConfirmationDialog from '../ConfirmationDialog'; import useLocalStorage from '../../hooks/useLocalStorage'; +import useWarningsStore from '../../store/warnings'; import { CustomTable, useTable } from '../tables/CustomTable'; const RowActions = ({ row, handleDownload, handleRestoreClick, handleDeleteClick, downloading }) => { @@ -135,6 +136,9 @@ export default function BackupManager() { const [tableSize] = useLocalStorage('table-size', 'default'); const is12Hour = timeFormat === '12h'; + // Warning suppression for confirmation dialogs + const suppressWarning = useWarningsStore((s) => s.suppressWarning); + // Schedule state const [schedule, setSchedule] = useState({ enabled: false, @@ -653,6 +657,8 @@ export default function BackupManager() { message={`Are you sure you want to restore from "${selectedBackup?.name}"? This will replace all current data with the backup data. This action cannot be undone.`} confirmLabel="Restore" cancelLabel="Cancel" + actionKey="restore-backup" + onSuppressChange={suppressWarning} /> ); From d718e5a142c7374748efe5d0de2446c13ca56808 Mon Sep 17 00:00:00 2001 From: Jim McBride Date: Tue, 9 Dec 2025 07:52:53 -0600 Subject: [PATCH 43/70] Implement timezone-aware backup scheduling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add timezone conversion functions (utcToLocal, localToUtc) - Use user's configured timezone from Settings (localStorage 'time-zone') - Convert times to UTC when saving to backend - Convert times from UTC to local when loading from backend - Display timezone info showing user's timezone and scheduled time - Helper text shows: "Timezone: America/New_York • Backup will run at 03:00" This addresses maintainer feedback to handle timezone properly: backend stores/schedules in UTC, frontend displays/edits in user's local time. --- .../src/components/backups/BackupManager.jsx | 73 +++++++++++++++++-- 1 file changed, 67 insertions(+), 6 deletions(-) diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx index 0246d47f0..3732ae094 100644 --- a/frontend/src/components/backups/BackupManager.jsx +++ b/frontend/src/components/backups/BackupManager.jsx @@ -97,6 +97,47 @@ function to24Hour(time12, period) { return `${String(hours24).padStart(2, '0')}:${String(minutes).padStart(2, '0')}`; } +// Convert UTC time (HH:MM) to local time (HH:MM) +function utcToLocal(utcTime) { + if (!utcTime) return '00:00'; + const [hours, minutes] = utcTime.split(':').map(Number); + + // Create a date in UTC + const date = new Date(); + date.setUTCHours(hours, minutes, 0, 0); + + // Get local time components + const localHours = date.getHours(); + const localMinutes = date.getMinutes(); + + return `${String(localHours).padStart(2, '0')}:${String(localMinutes).padStart(2, '0')}`; +} + +// Convert local time (HH:MM) to UTC time (HH:MM) +function localToUtc(localTime) { + if (!localTime) return '00:00'; + const [hours, minutes] = localTime.split(':').map(Number); + + // Create a date in local time + const date = new Date(); + date.setHours(hours, minutes, 0, 0); + + // Get UTC time components + const utcHours = date.getUTCHours(); + const utcMinutes = date.getUTCMinutes(); + + return `${String(utcHours).padStart(2, '0')}:${String(utcMinutes).padStart(2, '0')}`; +} + +// Get default timezone (same as Settings page) +function getDefaultTimeZone() { + try { + return Intl.DateTimeFormat().resolvedOptions().timeZone || 'UTC'; + } catch { + return 'UTC'; + } +} + const DAYS_OF_WEEK = [ { value: '0', label: 'Sunday' }, { value: '1', label: 'Monday' }, @@ -131,9 +172,10 @@ export default function BackupManager() { const [deleteConfirmOpen, setDeleteConfirmOpen] = useState(false); const [selectedBackup, setSelectedBackup] = useState(null); - // Read user's time format preference from settings + // Read user's preferences from settings const [timeFormat] = useLocalStorage('time-format', '12h'); const [tableSize] = useLocalStorage('table-size', 'default'); + const [userTimezone] = useLocalStorage('time-zone', getDefaultTimeZone()); const is12Hour = timeFormat === '12h'; // Warning suppression for confirmation dialogs @@ -256,10 +298,16 @@ export default function BackupManager() { setScheduleLoading(true); try { const settings = await API.getBackupSchedule(); - setSchedule(settings); + + // Convert UTC time from backend to local time + const localTime = utcToLocal(settings.time); + + // Store with local time for display + setSchedule({ ...settings, time: localTime }); setScheduleChanged(false); - // Initialize 12-hour display values from the loaded time - const { time, period } = to12Hour(settings.time); + + // Initialize 12-hour display values from the local time + const { time, period } = to12Hour(localTime); setDisplayTime(time); setTimePeriod(period); } catch (error) { @@ -302,9 +350,17 @@ export default function BackupManager() { const handleSaveSchedule = async () => { setScheduleSaving(true); try { - const updated = await API.updateBackupSchedule(schedule); - setSchedule(updated); + // Convert local time to UTC before sending to backend + const utcTime = localToUtc(schedule.time); + const scheduleToSave = { ...schedule, time: utcTime }; + + const updated = await API.updateBackupSchedule(scheduleToSave); + + // Convert UTC time from backend response back to local time + const localTime = utcToLocal(updated.time); + setSchedule({ ...updated, time: localTime }); setScheduleChanged(false); + notifications.show({ title: 'Success', message: 'Backup schedule saved', @@ -518,6 +574,11 @@ export default function BackupManager() { Save + {schedule.enabled && schedule.time && ( + + Timezone: {userTimezone} • Backup will run at {schedule.time} + + )} )} From 5fbcaa91e0906069c95f5ae166a2a75a774cc26d Mon Sep 17 00:00:00 2001 From: Jim McBride Date: Tue, 9 Dec 2025 07:55:47 -0600 Subject: [PATCH 44/70] Add custom cron expression support for backup scheduling Frontend changes: - Add advanced mode toggle switch for cron expressions - Show cron expression input with helpful examples when enabled - Display format hints: "minute hour day month weekday" - Provide common examples (daily, weekly, every 6 hours, etc.) - Conditionally render simple or advanced scheduling UI - Support switching between simple and advanced modes Backend changes: - Add cron_expression to schedule settings (SETTING_KEYS, DEFAULTS) - Update get_schedule_settings to include cron_expression - Update update_schedule_settings to handle cron_expression - Extend _sync_periodic_task to parse and use cron expressions - Parse 5-part cron format: minute hour day_of_month month_of_year day_of_week - Create CrontabSchedule from cron expression or simple frequency - Add validation and error handling for invalid cron expressions This addresses maintainer feedback for "custom scheduler (cron style) for more control". Users can now schedule backups with full cron flexibility beyond daily/weekly. --- apps/backups/scheduler.py | 67 +++++--- .../src/components/backups/BackupManager.jsx | 160 +++++++++++++----- 2 files changed, 162 insertions(+), 65 deletions(-) diff --git a/apps/backups/scheduler.py b/apps/backups/scheduler.py index 52186e90d..b0b37567c 100644 --- a/apps/backups/scheduler.py +++ b/apps/backups/scheduler.py @@ -15,6 +15,7 @@ "time": "backup_schedule_time", "day_of_week": "backup_schedule_day_of_week", "retention_count": "backup_retention_count", + "cron_expression": "backup_schedule_cron_expression", } DEFAULTS = { @@ -23,6 +24,7 @@ "time": "03:00", "day_of_week": 0, # Sunday "retention_count": 0, + "cron_expression": "", } @@ -60,6 +62,7 @@ def get_schedule_settings() -> dict: "time": _get_setting("time"), "day_of_week": _get_setting("day_of_week"), "retention_count": _get_setting("retention_count"), + "cron_expression": _get_setting("cron_expression"), } @@ -88,7 +91,7 @@ def update_schedule_settings(data: dict) -> dict: raise ValueError("retention_count must be >= 0") # Update settings - for key in ("enabled", "frequency", "time", "day_of_week", "retention_count"): + for key in ("enabled", "frequency", "time", "day_of_week", "retention_count", "cron_expression"): if key in data: _set_setting(key, data[key]) @@ -108,26 +111,48 @@ def _sync_periodic_task() -> None: logger.info("Backup schedule disabled, removed periodic task") return - # Parse time - hour, minute = settings["time"].split(":") - - # Build crontab based on frequency - if settings["frequency"] == "daily": - crontab, _ = CrontabSchedule.objects.get_or_create( - minute=minute, - hour=hour, - day_of_week="*", - day_of_month="*", - month_of_year="*", - ) - else: # weekly - crontab, _ = CrontabSchedule.objects.get_or_create( - minute=minute, - hour=hour, - day_of_week=str(settings["day_of_week"]), - day_of_month="*", - month_of_year="*", - ) + # Check if using cron expression (advanced mode) + if settings["cron_expression"]: + # Parse cron expression: "minute hour day month weekday" + try: + parts = settings["cron_expression"].split() + if len(parts) != 5: + raise ValueError("Cron expression must have 5 parts: minute hour day month weekday") + + minute, hour, day_of_month, month_of_year, day_of_week = parts + + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=day_of_week, + day_of_month=day_of_month, + month_of_year=month_of_year, + ) + except Exception as e: + logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}") + raise ValueError(f"Invalid cron expression: {e}") + else: + # Use simple frequency-based scheduling + # Parse time + hour, minute = settings["time"].split(":") + + # Build crontab based on frequency + if settings["frequency"] == "daily": + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week="*", + day_of_month="*", + month_of_year="*", + ) + else: # weekly + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=str(settings["day_of_week"]), + day_of_month="*", + month_of_year="*", + ) # Create or update the periodic task task, created = PeriodicTask.objects.update_or_create( diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx index 3732ae094..dd47f7326 100644 --- a/frontend/src/components/backups/BackupManager.jsx +++ b/frontend/src/components/backups/BackupManager.jsx @@ -188,10 +188,12 @@ export default function BackupManager() { time: '03:00', day_of_week: 0, retention_count: 0, + cron_expression: '', }); const [scheduleLoading, setScheduleLoading] = useState(false); const [scheduleSaving, setScheduleSaving] = useState(false); const [scheduleChanged, setScheduleChanged] = useState(false); + const [advancedMode, setAdvancedMode] = useState(false); // For 12-hour display mode const [displayTime, setDisplayTime] = useState('3:00'); @@ -299,17 +301,24 @@ export default function BackupManager() { try { const settings = await API.getBackupSchedule(); - // Convert UTC time from backend to local time - const localTime = utcToLocal(settings.time); + // Check if using cron expression (advanced mode) + if (settings.cron_expression) { + setAdvancedMode(true); + setSchedule(settings); + } else { + // Convert UTC time from backend to local time + const localTime = utcToLocal(settings.time); - // Store with local time for display - setSchedule({ ...settings, time: localTime }); - setScheduleChanged(false); + // Store with local time for display + setSchedule({ ...settings, time: localTime }); + + // Initialize 12-hour display values from the local time + const { time, period } = to12Hour(localTime); + setDisplayTime(time); + setTimePeriod(period); + } - // Initialize 12-hour display values from the local time - const { time, period } = to12Hour(localTime); - setDisplayTime(time); - setTimePeriod(period); + setScheduleChanged(false); } catch (error) { // Ignore errors on initial load - settings may not exist yet } finally { @@ -350,15 +359,27 @@ export default function BackupManager() { const handleSaveSchedule = async () => { setScheduleSaving(true); try { - // Convert local time to UTC before sending to backend - const utcTime = localToUtc(schedule.time); - const scheduleToSave = { ...schedule, time: utcTime }; + let scheduleToSave; + + if (advancedMode) { + // In advanced mode, send cron expression as-is + scheduleToSave = schedule; + } else { + // Convert local time to UTC before sending to backend + const utcTime = localToUtc(schedule.time); + scheduleToSave = { ...schedule, time: utcTime, cron_expression: '' }; + } const updated = await API.updateBackupSchedule(scheduleToSave); - // Convert UTC time from backend response back to local time - const localTime = utcToLocal(updated.time); - setSchedule({ ...updated, time: localTime }); + if (advancedMode) { + setSchedule(updated); + } else { + // Convert UTC time from backend response back to local time + const localTime = utcToLocal(updated.time); + setSchedule({ ...updated, time: localTime }); + } + setScheduleChanged(false); notifications.show({ @@ -509,17 +530,65 @@ export default function BackupManager() { ) : ( <> - - handleScheduleChange('frequency', value)} + data={[ + { value: 'daily', label: 'Daily' }, + { value: 'weekly', label: 'Weekly' }, + ]} + disabled={!schedule.enabled} + /> {schedule.frequency === 'weekly' && ( { + const minute = displayTime ? displayTime.split(':')[1] : '00'; + handleTimeChange12h(`${value}:${minute}`, null); + }} + data={Array.from({ length: 12 }, (_, i) => ({ + value: String(i + 1), + label: String(i + 1), + }))} + disabled={!schedule.enabled} + searchable + /> + handleTimeChange12h(null, value)} data={[ @@ -709,13 +680,36 @@ export default function BackupManager() { /> ) : ( - handleTimeChange24h(e.currentTarget.value)} - placeholder="03:00" - disabled={!schedule.enabled} - /> + + { + const hour = schedule.time ? schedule.time.split(':')[0] : '00'; + handleTimeChange24h(`${hour}:${value}`); + }} + data={Array.from({ length: 60 }, (_, i) => ({ + value: String(i).padStart(2, '0'), + label: String(i).padStart(2, '0'), + }))} + disabled={!schedule.enabled} + searchable + /> + )} - Timezone: {userTimezone} • Backup will run at {schedule.time} + System Timezone: {userTimezone} • Backup will run at {schedule.time} {userTimezone} )} From 662c5ff89a02049a6990661839df571bbde9c834 Mon Sep 17 00:00:00 2001 From: Jim McBride Date: Sat, 13 Dec 2025 18:49:36 -0600 Subject: [PATCH 60/70] Reorganize simple mode backup scheduler layout - Row 1: Frequency, Day (if weekly), Hour, Minute, Period (if 12h) - Row 2: Retention, Save button - Use wrap=nowrap to keep time selectors on same row --- apps/backups/tests.py | 35 +++ .../src/components/backups/BackupManager.jsx | 204 +++++++++--------- 2 files changed, 139 insertions(+), 100 deletions(-) diff --git a/apps/backups/tests.py b/apps/backups/tests.py index cded0ba48..dc8a51363 100644 --- a/apps/backups/tests.py +++ b/apps/backups/tests.py @@ -1016,6 +1016,41 @@ def test_periodic_task_timezone_updates_with_schedule(self): scheduler.update_schedule_settings({'enabled': False}) CoreSettings.set_system_time_zone(original_tz) + def test_orphaned_crontab_cleanup(self): + """Test that old CrontabSchedule is deleted when schedule changes""" + from . import scheduler + from django_celery_beat.models import PeriodicTask, CrontabSchedule + + # Create initial daily schedule + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + first_crontab_id = task.crontab.id + initial_count = CrontabSchedule.objects.count() + + # Change to weekly schedule (different crontab) + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'day_of_week': 3, + 'time': '03:00', + }) + + task.refresh_from_db() + second_crontab_id = task.crontab.id + + # Verify old crontab was deleted + self.assertNotEqual(first_crontab_id, second_crontab_id) + self.assertFalse(CrontabSchedule.objects.filter(id=first_crontab_id).exists()) + self.assertEqual(CrontabSchedule.objects.count(), initial_count) + + # Cleanup + scheduler.update_schedule_settings({'enabled': False}) + class BackupTasksTestCase(TestCase): """Test cases for backup Celery tasks""" diff --git a/frontend/src/components/backups/BackupManager.jsx b/frontend/src/components/backups/BackupManager.jsx index 2376aa147..fed0dcfaf 100644 --- a/frontend/src/components/backups/BackupManager.jsx +++ b/frontend/src/components/backups/BackupManager.jsx @@ -618,116 +618,120 @@ export default function BackupManager() { ) : ( - - handleScheduleChange('day_of_week', parseInt(value, 10))} - data={DAYS_OF_WEEK} - disabled={!schedule.enabled} - /> - )} - {is12Hour ? ( - - { - const hour = displayTime ? displayTime.split(':')[0] : '12'; - handleTimeChange12h(`${hour}:${value}`, null); - }} - data={Array.from({ length: 60 }, (_, i) => ({ - value: String(i).padStart(2, '0'), - label: String(i).padStart(2, '0'), - }))} - disabled={!schedule.enabled} - searchable - /> + + handleScheduleChange('day_of_week', parseInt(value, 10))} + data={DAYS_OF_WEEK} + disabled={!schedule.enabled} + /> + )} + {is12Hour ? ( + <> + { + const hour = displayTime ? displayTime.split(':')[0] : '12'; + handleTimeChange12h(`${hour}:${value}`, null); + }} + data={Array.from({ length: 60 }, (_, i) => ({ + value: String(i).padStart(2, '0'), + label: String(i).padStart(2, '0'), + }))} + disabled={!schedule.enabled} + searchable + /> + { + const minute = schedule.time ? schedule.time.split(':')[1] : '00'; + handleTimeChange24h(`${value}:${minute}`); + }} + data={Array.from({ length: 24 }, (_, i) => ({ + value: String(i).padStart(2, '0'), + label: String(i).padStart(2, '0'), + }))} + disabled={!schedule.enabled} + searchable + /> + { - const minute = schedule.time ? schedule.time.split(':')[1] : '00'; - handleTimeChange24h(`${value}:${minute}`); - }} - data={Array.from({ length: 24 }, (_, i) => ({ - value: String(i).padStart(2, '0'), - label: String(i).padStart(2, '0'), - }))} - disabled={!schedule.enabled} - searchable - /> -