diff --git a/.dockerignore b/.dockerignore index c79ca7b4d..296537de4 100755 --- a/.dockerignore +++ b/.dockerignore @@ -31,3 +31,4 @@ LICENSE README.md data/ +docker/data/ diff --git a/.github/workflows/base-image.yml b/.github/workflows/base-image.yml index f926d892f..72d263d09 100644 --- a/.github/workflows/base-image.yml +++ b/.github/workflows/base-image.yml @@ -101,6 +101,28 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=DispatcharrBase version: ${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }} + - name: Build and push Docker base image uses: docker/build-push-action@v4 with: @@ -113,6 +135,7 @@ jobs: ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} build-args: | REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} REPO_NAME=${{ needs.prepare.outputs.repo_name }} @@ -154,18 +177,74 @@ jobs: # GitHub Container Registry manifests # branch tag (e.g. base or base-dev) - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 # branch + timestamp tag - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 # Docker Hub manifests # branch tag (e.g. base or base-dev) - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 # branch + timestamp tag - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${BRANCH_TAG}-${TIMESTAMP}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=DispatcharrBase version: ${BRANCH_TAG}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5da4118c8..d8cd7079e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,6 +3,8 @@ name: CI Pipeline on: push: branches: [dev] + paths-ignore: + - '**.md' pull_request: branches: [dev] workflow_dispatch: @@ -117,7 +119,27 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - # use metadata from the prepare job + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=Dispatcharr version: ${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }} - name: Build and push Docker image uses: docker/build-push-action@v4 @@ -135,6 +157,7 @@ jobs: ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.branch_tag }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.version }}-${{ needs.prepare.outputs.timestamp }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} build-args: | REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} REPO_NAME=${{ needs.prepare.outputs.repo_name }} @@ -179,16 +202,72 @@ jobs: echo "Creating multi-arch manifest for ${OWNER}/${REPO}" # branch tag (e.g. latest or dev) - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${BRANCH_TAG}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG} \ ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-amd64 ghcr.io/${OWNER}/${REPO}:${BRANCH_TAG}-arm64 # version + timestamp tag - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP} \ ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 # also create Docker Hub manifests using the same username - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${BRANCH_TAG}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${BRANCH_TAG}-arm64 - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${VERSION}-${TIMESTAMP}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=Dispatcharr version: ${VERSION}-${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-${TIMESTAMP}-arm64 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e9734eb48..06b5e44f5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -25,6 +25,7 @@ jobs: new_version: ${{ steps.update_version.outputs.new_version }} repo_owner: ${{ steps.meta.outputs.repo_owner }} repo_name: ${{ steps.meta.outputs.repo_name }} + timestamp: ${{ steps.timestamp.outputs.timestamp }} steps: - uses: actions/checkout@v3 with: @@ -56,6 +57,12 @@ jobs: REPO_NAME=$(echo "${{ github.repository }}" | cut -d '/' -f 2 | tr '[:upper:]' '[:lower:]') echo "repo_name=${REPO_NAME}" >> $GITHUB_OUTPUT + - name: Generate timestamp for build + id: timestamp + run: | + TIMESTAMP=$(date -u +'%Y%m%d%H%M%S') + echo "timestamp=${TIMESTAMP}" >> $GITHUB_OUTPUT + - name: Commit and Tag run: | git add version.py CHANGELOG.md @@ -104,6 +111,28 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Extract metadata for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }} + docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }} + labels: | + org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }} + org.opencontainers.image.description=Your ultimate IPTV & stream Management companion. + org.opencontainers.image.url=https://github.com/${{ github.repository }} + org.opencontainers.image.source=https://github.com/${{ github.repository }} + org.opencontainers.image.version=${{ needs.prepare.outputs.new_version }} + org.opencontainers.image.created=${{ needs.prepare.outputs.timestamp }} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.licenses=See repository + org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/ + org.opencontainers.image.vendor=${{ needs.prepare.outputs.repo_owner }} + org.opencontainers.image.authors=${{ github.actor }} + maintainer=${{ github.actor }} + build_version=Dispatcharr version: ${{ needs.prepare.outputs.new_version }} Build date: ${{ needs.prepare.outputs.timestamp }} + - name: Build and push Docker image uses: docker/build-push-action@v4 with: @@ -115,6 +144,7 @@ jobs: ghcr.io/${{ needs.prepare.outputs.repo_owner }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:latest-${{ matrix.platform }} docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${{ needs.prepare.outputs.repo_name }}:${{ needs.prepare.outputs.new_version }}-${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} build-args: | REPO_OWNER=${{ needs.prepare.outputs.repo_owner }} REPO_NAME=${{ needs.prepare.outputs.repo_name }} @@ -149,25 +179,82 @@ jobs: OWNER=${{ needs.prepare.outputs.repo_owner }} REPO=${{ needs.prepare.outputs.repo_name }} VERSION=${{ needs.prepare.outputs.new_version }} + TIMESTAMP=${{ needs.prepare.outputs.timestamp }} echo "Creating multi-arch manifest for ${OWNER}/${REPO}" # GitHub Container Registry manifests # latest tag - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:latest \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=latest" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:latest \ ghcr.io/${OWNER}/${REPO}:latest-amd64 ghcr.io/${OWNER}/${REPO}:latest-arm64 # version tag - docker buildx imagetools create --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${VERSION}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \ + --tag ghcr.io/${OWNER}/${REPO}:${VERSION} \ ghcr.io/${OWNER}/${REPO}:${VERSION}-amd64 ghcr.io/${OWNER}/${REPO}:${VERSION}-arm64 # Docker Hub manifests # latest tag - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=latest" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:latest-arm64 # version tag - docker buildx imagetools create --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \ + docker buildx imagetools create \ + --annotation "org.opencontainers.image.title=${{ needs.prepare.outputs.repo_name }}" \ + --annotation "org.opencontainers.image.description=Your ultimate IPTV & stream Management companion." \ + --annotation "org.opencontainers.image.url=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.source=https://github.com/${{ github.repository }}" \ + --annotation "org.opencontainers.image.version=${VERSION}" \ + --annotation "org.opencontainers.image.created=${TIMESTAMP}" \ + --annotation "org.opencontainers.image.revision=${{ github.sha }}" \ + --annotation "org.opencontainers.image.licenses=See repository" \ + --annotation "org.opencontainers.image.documentation=https://dispatcharr.github.io/Dispatcharr-Docs/" \ + --annotation "org.opencontainers.image.vendor=${OWNER}" \ + --annotation "org.opencontainers.image.authors=${{ github.actor }}" \ + --annotation "maintainer=${{ github.actor }}" \ + --annotation "build_version=Dispatcharr version: ${VERSION} Build date: ${TIMESTAMP}" \ + --tag docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION} \ docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-amd64 docker.io/${{ secrets.DOCKERHUB_ORGANIZATION }}/${REPO}:${VERSION}-arm64 create-release: diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f679a24c..ed69c9d30 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,54 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- VOD client stop button in Stats page: Users can now disconnect individual VOD clients from the Stats view, similar to the existing channel client disconnect functionality. +- Automated configuration backup/restore system with scheduled backups, retention policies, and async task processing - Thanks [@stlalpha](https://github.com/stlalpha) (Closes #153) + +### Changed + +- Removed unreachable code path in m3u output - Thanks [@DawtCom](https://github.com/DawtCom) + +### Fixed + +- VOD episode processing now correctly handles duplicate episodes (same episode in multiple languages/qualities) by reusing Episode records across multiple M3UEpisodeRelation entries instead of attempting to create duplicates (Fixes #556) +- XtreamCodes series streaming endpoint now correctly handles episodes with multiple streams (different languages/qualities) by selecting the best available stream based on account priority (Fixes #569) +- XtreamCodes series info API now returns unique episodes instead of duplicate entries when multiple streams exist for the same episode (different languages/qualities) +- nginx now gracefully handles hosts without IPv6 support by automatically disabling IPv6 binding at startup (Fixes #744) +- XtreamCodes EPG API now returns correct date/time format for start/end fields and proper string types for timestamps and channel_id +- XtreamCodes EPG API now handles None values for title and description fields to prevent AttributeError + +## [0.14.0] - 2025-12-09 + +### Added + +- Sort buttons for 'Group' and 'M3U' columns in Streams table for improved stream organization and filtering - Thanks [@bobey6](https://github.com/bobey6) +- EPG source priority field for controlling which EPG source is preferred when multiple sources have matching entries for a channel (higher numbers = higher priority) (Closes #603) + +### Changed + +- EPG program parsing optimized for sources with many channels but only a fraction mapped. Now parses XML file once per source instead of once per channel, dramatically reducing I/O and CPU overhead. For sources with 10,000 channels and 100 mapped, this results in ~99x fewer file opens and ~100x fewer full file scans. Orphaned programs for unmapped channels are also cleaned up during refresh to prevent database bloat. Database updates are now atomic to prevent clients from seeing empty/partial EPG data during refresh. +- EPG table now displays detailed status messages including refresh progress, success messages, and last message for idle sources (matching M3U table behavior) (Closes #214) +- IPv6 access now allowed by default with all IPv6 CIDRs accepted - Thanks [@adrianmace](https://github.com/adrianmace) +- nginx.conf updated to bind to both IPv4 and IPv6 ports - Thanks [@jordandalley](https://github.com/jordandalley) +- EPG matching now respects source priority and only uses active (enabled) EPG sources (Closes #672) +- EPG form API Key field now only visible when Schedules Direct source type is selected + +### Fixed + +- EPG table "Updated" column now updates in real-time via WebSocket using the actual backend timestamp instead of requiring a page refresh +- Bulk channel editor confirmation dialog now displays the correct stream profile name that will be applied to the selected channels. +- uWSGI not found and 502 bad gateway on first startup + +## [0.13.1] - 2025-12-06 + +### Fixed + +- JWT token generated so is unique for each deployment + +## [0.13.0] - 2025-12-02 + +### Added + - `CHANGELOG.md` file following Keep a Changelog format to document all notable changes and project history - System event logging and viewer: Comprehensive logging system that tracks internal application events (M3U refreshes, EPG updates, stream switches, errors) with a dedicated UI viewer for filtering and reviewing historical events. Improves monitoring, troubleshooting, and understanding system behavior - M3U/EPG endpoint caching: Implements intelligent caching for frequently requested M3U playlists and EPG data to reduce database load and improve response times for clients. @@ -24,7 +72,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - XC player API now returns server_info for unknown actions to align with provider behavior - XC player API refactored to streamline action handling and ensure consistent responses - Date parsing logic in generate_custom_dummy_programs improved to handle empty or invalid inputs -- UI now reflects date and time formats chosen by user - Thanks [@Biologisten](https://github.com/Biologisten) +- DVR cards now reflect date and time formats chosen by user - Thanks [@Biologisten](https://github.com/Biologisten) - "Uncategorized" categories and relations now automatically created for VOD accounts to improve content management (#627) - Improved minimum horizontal size in the stats page for better usability on smaller displays - M3U and EPG generation now handles missing channel profiles with appropriate error logging diff --git a/apps/api/urls.py b/apps/api/urls.py index 7d9edb523..4c92c70a4 100644 --- a/apps/api/urls.py +++ b/apps/api/urls.py @@ -27,6 +27,7 @@ path('core/', include(('core.api_urls', 'core'), namespace='core')), path('plugins/', include(('apps.plugins.api_urls', 'plugins'), namespace='plugins')), path('vod/', include(('apps.vod.api_urls', 'vod'), namespace='vod')), + path('backups/', include(('apps.backups.api_urls', 'backups'), namespace='backups')), # path('output/', include(('apps.output.api_urls', 'output'), namespace='output')), #path('player/', include(('apps.player.api_urls', 'player'), namespace='player')), #path('settings/', include(('apps.settings.api_urls', 'settings'), namespace='settings')), diff --git a/apps/backups/__init__.py b/apps/backups/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/apps/backups/api_urls.py b/apps/backups/api_urls.py new file mode 100644 index 000000000..226758cc0 --- /dev/null +++ b/apps/backups/api_urls.py @@ -0,0 +1,18 @@ +from django.urls import path + +from . import api_views + +app_name = "backups" + +urlpatterns = [ + path("", api_views.list_backups, name="backup-list"), + path("create/", api_views.create_backup, name="backup-create"), + path("upload/", api_views.upload_backup, name="backup-upload"), + path("schedule/", api_views.get_schedule, name="backup-schedule-get"), + path("schedule/update/", api_views.update_schedule, name="backup-schedule-update"), + path("status//", api_views.backup_status, name="backup-status"), + path("/download-token/", api_views.get_download_token, name="backup-download-token"), + path("/download/", api_views.download_backup, name="backup-download"), + path("/delete/", api_views.delete_backup, name="backup-delete"), + path("/restore/", api_views.restore_backup, name="backup-restore"), +] diff --git a/apps/backups/api_views.py b/apps/backups/api_views.py new file mode 100644 index 000000000..c6ff7d269 --- /dev/null +++ b/apps/backups/api_views.py @@ -0,0 +1,364 @@ +import hashlib +import hmac +import logging +import os +from pathlib import Path + +from celery.result import AsyncResult +from django.conf import settings +from django.http import HttpResponse, StreamingHttpResponse, Http404 +from rest_framework import status +from rest_framework.decorators import api_view, permission_classes, parser_classes +from rest_framework.permissions import IsAdminUser, AllowAny +from rest_framework.parsers import MultiPartParser, FormParser +from rest_framework.response import Response + +from . import services +from .tasks import create_backup_task, restore_backup_task +from .scheduler import get_schedule_settings, update_schedule_settings + +logger = logging.getLogger(__name__) + + +def _generate_task_token(task_id: str) -> str: + """Generate a signed token for task status access without auth.""" + secret = settings.SECRET_KEY.encode() + return hmac.new(secret, task_id.encode(), hashlib.sha256).hexdigest()[:32] + + +def _verify_task_token(task_id: str, token: str) -> bool: + """Verify a task token is valid.""" + expected = _generate_task_token(task_id) + return hmac.compare_digest(expected, token) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def list_backups(request): + """List all available backup files.""" + try: + backups = services.list_backups() + return Response(backups, status=status.HTTP_200_OK) + except Exception as e: + return Response( + {"detail": f"Failed to list backups: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +def create_backup(request): + """Create a new backup (async via Celery).""" + try: + task = create_backup_task.delay() + return Response( + { + "detail": "Backup started", + "task_id": task.id, + "task_token": _generate_task_token(task.id), + }, + status=status.HTTP_202_ACCEPTED, + ) + except Exception as e: + return Response( + {"detail": f"Failed to start backup: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([AllowAny]) +def backup_status(request, task_id): + """Check the status of a backup/restore task. + + Requires either: + - Valid admin authentication, OR + - Valid task_token query parameter + """ + # Check for token-based auth (for restore when session is invalidated) + token = request.query_params.get("token") + if token: + if not _verify_task_token(task_id, token): + return Response( + {"detail": "Invalid task token"}, + status=status.HTTP_403_FORBIDDEN, + ) + else: + # Fall back to admin auth check + if not request.user.is_authenticated or not request.user.is_staff: + return Response( + {"detail": "Authentication required"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + + try: + result = AsyncResult(task_id) + + if result.ready(): + task_result = result.get() + if task_result.get("status") == "completed": + return Response({ + "state": "completed", + "result": task_result, + }) + else: + return Response({ + "state": "failed", + "error": task_result.get("error", "Unknown error"), + }) + elif result.failed(): + return Response({ + "state": "failed", + "error": str(result.result), + }) + else: + return Response({ + "state": result.state.lower(), + }) + except Exception as e: + return Response( + {"detail": f"Failed to get task status: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def get_download_token(request, filename): + """Get a signed token for downloading a backup file.""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise Http404("Backup file not found") + + token = _generate_task_token(filename) + return Response({"token": token}) + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Failed to generate token: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([AllowAny]) +def download_backup(request, filename): + """Download a backup file. + + Requires either: + - Valid admin authentication, OR + - Valid download_token query parameter + """ + # Check for token-based auth (avoids CORS preflight issues) + token = request.query_params.get("token") + if token: + if not _verify_task_token(filename, token): + return Response( + {"detail": "Invalid download token"}, + status=status.HTTP_403_FORBIDDEN, + ) + else: + # Fall back to admin auth check + if not request.user.is_authenticated or not request.user.is_staff: + return Response( + {"detail": "Authentication required"}, + status=status.HTTP_401_UNAUTHORIZED, + ) + + try: + # Security: prevent path traversal by checking for suspicious characters + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = (backup_dir / filename).resolve() + + # Security: ensure the resolved path is still within backup_dir + if not str(backup_file).startswith(str(backup_dir.resolve())): + raise Http404("Invalid filename") + + if not backup_file.exists() or not backup_file.is_file(): + raise Http404("Backup file not found") + + file_size = backup_file.stat().st_size + + # Use X-Accel-Redirect for nginx (AIO container) - nginx serves file directly + # Fall back to streaming for non-nginx deployments + use_nginx_accel = os.environ.get("USE_NGINX_ACCEL", "").lower() == "true" + logger.info(f"[DOWNLOAD] File: {filename}, Size: {file_size}, USE_NGINX_ACCEL: {use_nginx_accel}") + + if use_nginx_accel: + # X-Accel-Redirect: Django returns immediately, nginx serves file + logger.info(f"[DOWNLOAD] Using X-Accel-Redirect: /protected-backups/{filename}") + response = HttpResponse() + response["X-Accel-Redirect"] = f"/protected-backups/{filename}" + response["Content-Type"] = "application/zip" + response["Content-Length"] = file_size + response["Content-Disposition"] = f'attachment; filename="{filename}"' + return response + else: + # Streaming fallback for non-nginx deployments + logger.info(f"[DOWNLOAD] Using streaming fallback (no nginx)") + def file_iterator(file_path, chunk_size=2 * 1024 * 1024): + with open(file_path, "rb") as f: + while chunk := f.read(chunk_size): + yield chunk + + response = StreamingHttpResponse( + file_iterator(backup_file), + content_type="application/zip", + ) + response["Content-Length"] = file_size + response["Content-Disposition"] = f'attachment; filename="{filename}"' + return response + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Download failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["DELETE"]) +@permission_classes([IsAdminUser]) +def delete_backup(request, filename): + """Delete a backup file.""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + services.delete_backup(filename) + return Response( + {"detail": "Backup deleted successfully"}, + status=status.HTTP_204_NO_CONTENT, + ) + except FileNotFoundError: + raise Http404("Backup file not found") + except Exception as e: + return Response( + {"detail": f"Delete failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +@parser_classes([MultiPartParser, FormParser]) +def upload_backup(request): + """Upload a backup file for restoration.""" + uploaded = request.FILES.get("file") + if not uploaded: + return Response( + {"detail": "No file uploaded"}, + status=status.HTTP_400_BAD_REQUEST, + ) + + try: + backup_dir = services.get_backup_dir() + filename = uploaded.name or "uploaded-backup.zip" + + # Ensure unique filename + backup_file = backup_dir / filename + counter = 1 + while backup_file.exists(): + name_parts = filename.rsplit(".", 1) + if len(name_parts) == 2: + backup_file = backup_dir / f"{name_parts[0]}-{counter}.{name_parts[1]}" + else: + backup_file = backup_dir / f"{filename}-{counter}" + counter += 1 + + # Save uploaded file + with backup_file.open("wb") as f: + for chunk in uploaded.chunks(): + f.write(chunk) + + return Response( + { + "detail": "Backup uploaded successfully", + "filename": backup_file.name, + }, + status=status.HTTP_201_CREATED, + ) + except Exception as e: + return Response( + {"detail": f"Upload failed: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["POST"]) +@permission_classes([IsAdminUser]) +def restore_backup(request, filename): + """Restore from a backup file (async via Celery). WARNING: This will flush the database!""" + try: + # Security: prevent path traversal + if ".." in filename or "/" in filename or "\\" in filename: + raise Http404("Invalid filename") + + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise Http404("Backup file not found") + + task = restore_backup_task.delay(filename) + return Response( + { + "detail": "Restore started", + "task_id": task.id, + "task_token": _generate_task_token(task.id), + }, + status=status.HTTP_202_ACCEPTED, + ) + except Http404: + raise + except Exception as e: + return Response( + {"detail": f"Failed to start restore: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["GET"]) +@permission_classes([IsAdminUser]) +def get_schedule(request): + """Get backup schedule settings.""" + try: + settings = get_schedule_settings() + return Response(settings) + except Exception as e: + return Response( + {"detail": f"Failed to get schedule: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + + +@api_view(["PUT"]) +@permission_classes([IsAdminUser]) +def update_schedule(request): + """Update backup schedule settings.""" + try: + settings = update_schedule_settings(request.data) + return Response(settings) + except ValueError as e: + return Response( + {"detail": str(e)}, + status=status.HTTP_400_BAD_REQUEST, + ) + except Exception as e: + return Response( + {"detail": f"Failed to update schedule: {str(e)}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) diff --git a/apps/backups/apps.py b/apps/backups/apps.py new file mode 100644 index 000000000..ee644149b --- /dev/null +++ b/apps/backups/apps.py @@ -0,0 +1,7 @@ +from django.apps import AppConfig + + +class BackupsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "apps.backups" + verbose_name = "Backups" diff --git a/apps/backups/migrations/__init__.py b/apps/backups/migrations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/apps/backups/models.py b/apps/backups/models.py new file mode 100644 index 000000000..e69de29bb diff --git a/apps/backups/scheduler.py b/apps/backups/scheduler.py new file mode 100644 index 000000000..2dd9e8289 --- /dev/null +++ b/apps/backups/scheduler.py @@ -0,0 +1,203 @@ +import json +import logging + +from django_celery_beat.models import PeriodicTask, CrontabSchedule + +from core.models import CoreSettings + +logger = logging.getLogger(__name__) + +BACKUP_SCHEDULE_TASK_NAME = "backup-scheduled-task" + +SETTING_KEYS = { + "enabled": "backup_schedule_enabled", + "frequency": "backup_schedule_frequency", + "time": "backup_schedule_time", + "day_of_week": "backup_schedule_day_of_week", + "retention_count": "backup_retention_count", + "cron_expression": "backup_schedule_cron_expression", +} + +DEFAULTS = { + "enabled": False, + "frequency": "daily", + "time": "03:00", + "day_of_week": 0, # Sunday + "retention_count": 0, + "cron_expression": "", +} + + +def _get_setting(key: str, default=None): + """Get a backup setting from CoreSettings.""" + try: + setting = CoreSettings.objects.get(key=SETTING_KEYS[key]) + value = setting.value + if key == "enabled": + return value.lower() == "true" + elif key in ("day_of_week", "retention_count"): + return int(value) + return value + except CoreSettings.DoesNotExist: + return default if default is not None else DEFAULTS.get(key) + + +def _set_setting(key: str, value) -> None: + """Set a backup setting in CoreSettings.""" + str_value = str(value).lower() if isinstance(value, bool) else str(value) + CoreSettings.objects.update_or_create( + key=SETTING_KEYS[key], + defaults={ + "name": f"Backup {key.replace('_', ' ').title()}", + "value": str_value, + }, + ) + + +def get_schedule_settings() -> dict: + """Get all backup schedule settings.""" + return { + "enabled": _get_setting("enabled"), + "frequency": _get_setting("frequency"), + "time": _get_setting("time"), + "day_of_week": _get_setting("day_of_week"), + "retention_count": _get_setting("retention_count"), + "cron_expression": _get_setting("cron_expression"), + } + + +def update_schedule_settings(data: dict) -> dict: + """Update backup schedule settings and sync the PeriodicTask.""" + # Validate + if "frequency" in data and data["frequency"] not in ("daily", "weekly"): + raise ValueError("frequency must be 'daily' or 'weekly'") + + if "time" in data: + try: + hour, minute = data["time"].split(":") + int(hour) + int(minute) + except (ValueError, AttributeError): + raise ValueError("time must be in HH:MM format") + + if "day_of_week" in data: + day = int(data["day_of_week"]) + if day < 0 or day > 6: + raise ValueError("day_of_week must be 0-6 (Sunday-Saturday)") + + if "retention_count" in data: + count = int(data["retention_count"]) + if count < 0: + raise ValueError("retention_count must be >= 0") + + # Update settings + for key in ("enabled", "frequency", "time", "day_of_week", "retention_count", "cron_expression"): + if key in data: + _set_setting(key, data[key]) + + # Sync the periodic task + _sync_periodic_task() + + return get_schedule_settings() + + +def _sync_periodic_task() -> None: + """Create, update, or delete the scheduled backup task based on settings.""" + settings = get_schedule_settings() + + if not settings["enabled"]: + # Delete the task if it exists + task = PeriodicTask.objects.filter(name=BACKUP_SCHEDULE_TASK_NAME).first() + if task: + old_crontab = task.crontab + task.delete() + _cleanup_orphaned_crontab(old_crontab) + logger.info("Backup schedule disabled, removed periodic task") + return + + # Get old crontab before creating new one + old_crontab = None + try: + old_task = PeriodicTask.objects.get(name=BACKUP_SCHEDULE_TASK_NAME) + old_crontab = old_task.crontab + except PeriodicTask.DoesNotExist: + pass + + # Check if using cron expression (advanced mode) + if settings["cron_expression"]: + # Parse cron expression: "minute hour day month weekday" + try: + parts = settings["cron_expression"].split() + if len(parts) != 5: + raise ValueError("Cron expression must have 5 parts: minute hour day month weekday") + + minute, hour, day_of_month, month_of_year, day_of_week = parts + + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=day_of_week, + day_of_month=day_of_month, + month_of_year=month_of_year, + timezone=CoreSettings.get_system_time_zone(), + ) + except Exception as e: + logger.error(f"Invalid cron expression '{settings['cron_expression']}': {e}") + raise ValueError(f"Invalid cron expression: {e}") + else: + # Use simple frequency-based scheduling + # Parse time + hour, minute = settings["time"].split(":") + + # Build crontab based on frequency + system_tz = CoreSettings.get_system_time_zone() + if settings["frequency"] == "daily": + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week="*", + day_of_month="*", + month_of_year="*", + timezone=system_tz, + ) + else: # weekly + crontab, _ = CrontabSchedule.objects.get_or_create( + minute=minute, + hour=hour, + day_of_week=str(settings["day_of_week"]), + day_of_month="*", + month_of_year="*", + timezone=system_tz, + ) + + # Create or update the periodic task + task, created = PeriodicTask.objects.update_or_create( + name=BACKUP_SCHEDULE_TASK_NAME, + defaults={ + "task": "apps.backups.tasks.scheduled_backup_task", + "crontab": crontab, + "enabled": True, + "kwargs": json.dumps({"retention_count": settings["retention_count"]}), + }, + ) + + # Clean up old crontab if it changed and is orphaned + if old_crontab and old_crontab.id != crontab.id: + _cleanup_orphaned_crontab(old_crontab) + + action = "Created" if created else "Updated" + logger.info(f"{action} backup schedule: {settings['frequency']} at {settings['time']}") + + +def _cleanup_orphaned_crontab(crontab_schedule): + """Delete old CrontabSchedule if no other tasks are using it.""" + if crontab_schedule is None: + return + + # Check if any other tasks are using this crontab + if PeriodicTask.objects.filter(crontab=crontab_schedule).exists(): + logger.debug(f"CrontabSchedule {crontab_schedule.id} still in use, not deleting") + return + + logger.debug(f"Cleaning up orphaned CrontabSchedule: {crontab_schedule.id}") + crontab_schedule.delete() diff --git a/apps/backups/services.py b/apps/backups/services.py new file mode 100644 index 000000000..b99fab6dd --- /dev/null +++ b/apps/backups/services.py @@ -0,0 +1,320 @@ +import datetime +import json +import os +import shutil +import subprocess +import tempfile +from pathlib import Path +from zipfile import ZipFile, ZIP_DEFLATED +import logging +import pytz + +from django.conf import settings +from core.models import CoreSettings + +logger = logging.getLogger(__name__) + + +def get_backup_dir() -> Path: + """Get the backup directory, creating it if necessary.""" + backup_dir = Path(settings.BACKUP_ROOT) + backup_dir.mkdir(parents=True, exist_ok=True) + return backup_dir + + +def _is_postgresql() -> bool: + """Check if we're using PostgreSQL.""" + return settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql" + + +def _get_pg_env() -> dict: + """Get environment variables for PostgreSQL commands.""" + db_config = settings.DATABASES["default"] + env = os.environ.copy() + env["PGPASSWORD"] = db_config.get("PASSWORD", "") + return env + + +def _get_pg_args() -> list[str]: + """Get common PostgreSQL command arguments.""" + db_config = settings.DATABASES["default"] + return [ + "-h", db_config.get("HOST", "localhost"), + "-p", str(db_config.get("PORT", 5432)), + "-U", db_config.get("USER", "postgres"), + "-d", db_config.get("NAME", "dispatcharr"), + ] + + +def _dump_postgresql(output_file: Path) -> None: + """Dump PostgreSQL database using pg_dump.""" + logger.info("Dumping PostgreSQL database with pg_dump...") + + cmd = [ + "pg_dump", + *_get_pg_args(), + "-Fc", # Custom format for pg_restore + "-v", # Verbose + "-f", str(output_file), + ] + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"pg_dump failed: {result.stderr}") + raise RuntimeError(f"pg_dump failed: {result.stderr}") + + logger.debug(f"pg_dump output: {result.stderr}") + + +def _restore_postgresql(dump_file: Path) -> None: + """Restore PostgreSQL database using pg_restore.""" + logger.info("[PG_RESTORE] Starting pg_restore...") + logger.info(f"[PG_RESTORE] Dump file: {dump_file}") + + pg_args = _get_pg_args() + logger.info(f"[PG_RESTORE] Connection args: {pg_args}") + + cmd = [ + "pg_restore", + "--clean", # Clean (drop) database objects before recreating + *pg_args, + "-v", # Verbose + str(dump_file), + ] + + logger.info(f"[PG_RESTORE] Running command: {' '.join(cmd)}") + + result = subprocess.run( + cmd, + env=_get_pg_env(), + capture_output=True, + text=True, + ) + + logger.info(f"[PG_RESTORE] Return code: {result.returncode}") + + # pg_restore may return non-zero even on partial success + # Check for actual errors vs warnings + if result.returncode != 0: + # Some errors during restore are expected (e.g., "does not exist" when cleaning) + # Only fail on critical errors + stderr = result.stderr.lower() + if "fatal" in stderr or "could not connect" in stderr: + logger.error(f"[PG_RESTORE] Failed critically: {result.stderr}") + raise RuntimeError(f"pg_restore failed: {result.stderr}") + else: + logger.warning(f"[PG_RESTORE] Completed with warnings: {result.stderr[:500]}...") + + logger.info("[PG_RESTORE] Completed successfully") + + +def _dump_sqlite(output_file: Path) -> None: + """Dump SQLite database using sqlite3 .backup command.""" + logger.info("Dumping SQLite database with sqlite3 .backup...") + db_path = Path(settings.DATABASES["default"]["NAME"]) + + if not db_path.exists(): + raise FileNotFoundError(f"SQLite database not found: {db_path}") + + # Use sqlite3 .backup command via stdin for reliable execution + result = subprocess.run( + ["sqlite3", str(db_path)], + input=f".backup '{output_file}'\n", + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"sqlite3 backup failed: {result.stderr}") + raise RuntimeError(f"sqlite3 backup failed: {result.stderr}") + + # Verify the backup file was created + if not output_file.exists(): + raise RuntimeError("sqlite3 backup failed: output file not created") + + logger.info(f"sqlite3 backup completed successfully: {output_file}") + + +def _restore_sqlite(dump_file: Path) -> None: + """Restore SQLite database by replacing the database file.""" + logger.info("Restoring SQLite database...") + db_path = Path(settings.DATABASES["default"]["NAME"]) + backup_current = None + + # Backup current database before overwriting + if db_path.exists(): + backup_current = db_path.with_suffix(".db.bak") + shutil.copy2(db_path, backup_current) + logger.info(f"Backed up current database to {backup_current}") + + # Ensure parent directory exists + db_path.parent.mkdir(parents=True, exist_ok=True) + + # The backup file from _dump_sqlite is a complete SQLite database file + # We can simply copy it over the existing database + shutil.copy2(dump_file, db_path) + + # Verify the restore worked by checking if sqlite3 can read it + result = subprocess.run( + ["sqlite3", str(db_path)], + input=".tables\n", + capture_output=True, + text=True, + ) + + if result.returncode != 0: + logger.error(f"sqlite3 verification failed: {result.stderr}") + # Try to restore from backup + if backup_current and backup_current.exists(): + shutil.copy2(backup_current, db_path) + logger.info("Restored original database from backup") + raise RuntimeError(f"sqlite3 restore verification failed: {result.stderr}") + + logger.info("sqlite3 restore completed successfully") + + +def create_backup() -> Path: + """ + Create a backup archive containing database dump and data directories. + Returns the path to the created backup file. + """ + backup_dir = get_backup_dir() + + # Use system timezone for filename (user-friendly), but keep internal timestamps as UTC + system_tz_name = CoreSettings.get_system_time_zone() + try: + system_tz = pytz.timezone(system_tz_name) + now_local = datetime.datetime.now(datetime.UTC).astimezone(system_tz) + timestamp = now_local.strftime("%Y.%m.%d.%H.%M.%S") + except Exception as e: + logger.warning(f"Failed to use system timezone {system_tz_name}: {e}, falling back to UTC") + timestamp = datetime.datetime.now(datetime.UTC).strftime("%Y.%m.%d.%H.%M.%S") + + backup_name = f"dispatcharr-backup-{timestamp}.zip" + backup_file = backup_dir / backup_name + + logger.info(f"Creating backup: {backup_name}") + + with tempfile.TemporaryDirectory(prefix="dispatcharr-backup-") as temp_dir: + temp_path = Path(temp_dir) + + # Determine database type and dump accordingly + if _is_postgresql(): + db_dump_file = temp_path / "database.dump" + _dump_postgresql(db_dump_file) + db_type = "postgresql" + else: + db_dump_file = temp_path / "database.sqlite3" + _dump_sqlite(db_dump_file) + db_type = "sqlite" + + # Create ZIP archive with compression and ZIP64 support for large files + with ZipFile(backup_file, "w", compression=ZIP_DEFLATED, allowZip64=True) as zip_file: + # Add database dump + zip_file.write(db_dump_file, db_dump_file.name) + + # Add metadata + metadata = { + "format": "dispatcharr-backup", + "version": 2, + "database_type": db_type, + "database_file": db_dump_file.name, + "created_at": datetime.datetime.now(datetime.UTC).isoformat(), + } + zip_file.writestr("metadata.json", json.dumps(metadata, indent=2)) + + logger.info(f"Backup created successfully: {backup_file}") + return backup_file + + +def restore_backup(backup_file: Path) -> None: + """ + Restore from a backup archive. + WARNING: This will overwrite the database! + """ + if not backup_file.exists(): + raise FileNotFoundError(f"Backup file not found: {backup_file}") + + logger.info(f"Restoring from backup: {backup_file}") + + with tempfile.TemporaryDirectory(prefix="dispatcharr-restore-") as temp_dir: + temp_path = Path(temp_dir) + + # Extract backup + logger.debug("Extracting backup archive...") + with ZipFile(backup_file, "r") as zip_file: + zip_file.extractall(temp_path) + + # Read metadata + metadata_file = temp_path / "metadata.json" + if not metadata_file.exists(): + raise ValueError("Invalid backup: missing metadata.json") + + with open(metadata_file) as f: + metadata = json.load(f) + + # Restore database + _restore_database(temp_path, metadata) + + logger.info("Restore completed successfully") + + +def _restore_database(temp_path: Path, metadata: dict) -> None: + """Restore database from backup.""" + db_type = metadata.get("database_type", "postgresql") + db_file = metadata.get("database_file", "database.dump") + dump_file = temp_path / db_file + + if not dump_file.exists(): + raise ValueError(f"Invalid backup: missing {db_file}") + + current_db_type = "postgresql" if _is_postgresql() else "sqlite" + + if db_type != current_db_type: + raise ValueError( + f"Database type mismatch: backup is {db_type}, " + f"but current database is {current_db_type}" + ) + + if db_type == "postgresql": + _restore_postgresql(dump_file) + else: + _restore_sqlite(dump_file) + + +def list_backups() -> list[dict]: + """List all available backup files with metadata.""" + backup_dir = get_backup_dir() + backups = [] + + for backup_file in sorted(backup_dir.glob("dispatcharr-backup-*.zip"), reverse=True): + # Use UTC timezone so frontend can convert to user's local time + created_time = datetime.datetime.fromtimestamp(backup_file.stat().st_mtime, datetime.UTC) + backups.append({ + "name": backup_file.name, + "size": backup_file.stat().st_size, + "created": created_time.isoformat(), + }) + + return backups + + +def delete_backup(filename: str) -> None: + """Delete a backup file.""" + backup_dir = get_backup_dir() + backup_file = backup_dir / filename + + if not backup_file.exists(): + raise FileNotFoundError(f"Backup file not found: {filename}") + + if not backup_file.is_file(): + raise ValueError(f"Invalid backup file: {filename}") + + backup_file.unlink() + logger.info(f"Deleted backup: {filename}") diff --git a/apps/backups/tasks.py b/apps/backups/tasks.py new file mode 100644 index 000000000..f531fef81 --- /dev/null +++ b/apps/backups/tasks.py @@ -0,0 +1,106 @@ +import logging +import traceback +from celery import shared_task + +from . import services + +logger = logging.getLogger(__name__) + + +def _cleanup_old_backups(retention_count: int) -> int: + """Delete old backups, keeping only the most recent N. Returns count deleted.""" + if retention_count <= 0: + return 0 + + backups = services.list_backups() + if len(backups) <= retention_count: + return 0 + + # Backups are sorted newest first, so delete from the end + to_delete = backups[retention_count:] + deleted = 0 + + for backup in to_delete: + try: + services.delete_backup(backup["name"]) + deleted += 1 + logger.info(f"[CLEANUP] Deleted old backup: {backup['name']}") + except Exception as e: + logger.error(f"[CLEANUP] Failed to delete {backup['name']}: {e}") + + return deleted + + +@shared_task(bind=True) +def create_backup_task(self): + """Celery task to create a backup asynchronously.""" + try: + logger.info(f"[BACKUP] Starting backup task {self.request.id}") + backup_file = services.create_backup() + logger.info(f"[BACKUP] Task {self.request.id} completed: {backup_file.name}") + return { + "status": "completed", + "filename": backup_file.name, + "size": backup_file.stat().st_size, + } + except Exception as e: + logger.error(f"[BACKUP] Task {self.request.id} failed: {str(e)}") + logger.error(f"[BACKUP] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } + + +@shared_task(bind=True) +def restore_backup_task(self, filename: str): + """Celery task to restore a backup asynchronously.""" + try: + logger.info(f"[RESTORE] Starting restore task {self.request.id} for {filename}") + backup_dir = services.get_backup_dir() + backup_file = backup_dir / filename + logger.info(f"[RESTORE] Backup file path: {backup_file}") + services.restore_backup(backup_file) + logger.info(f"[RESTORE] Task {self.request.id} completed successfully") + return { + "status": "completed", + "filename": filename, + } + except Exception as e: + logger.error(f"[RESTORE] Task {self.request.id} failed: {str(e)}") + logger.error(f"[RESTORE] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } + + +@shared_task(bind=True) +def scheduled_backup_task(self, retention_count: int = 0): + """Celery task for scheduled backups with optional retention cleanup.""" + try: + logger.info(f"[SCHEDULED] Starting scheduled backup task {self.request.id}") + + # Create backup + backup_file = services.create_backup() + logger.info(f"[SCHEDULED] Backup created: {backup_file.name}") + + # Cleanup old backups if retention is set + deleted = 0 + if retention_count > 0: + deleted = _cleanup_old_backups(retention_count) + logger.info(f"[SCHEDULED] Cleanup complete, deleted {deleted} old backup(s)") + + return { + "status": "completed", + "filename": backup_file.name, + "size": backup_file.stat().st_size, + "deleted_count": deleted, + } + except Exception as e: + logger.error(f"[SCHEDULED] Task {self.request.id} failed: {str(e)}") + logger.error(f"[SCHEDULED] Traceback: {traceback.format_exc()}") + return { + "status": "failed", + "error": str(e), + } diff --git a/apps/backups/tests.py b/apps/backups/tests.py new file mode 100644 index 000000000..dc8a51363 --- /dev/null +++ b/apps/backups/tests.py @@ -0,0 +1,1163 @@ +import json +import tempfile +from io import BytesIO +from pathlib import Path +from zipfile import ZipFile +from unittest.mock import patch, MagicMock + +from django.test import TestCase +from django.contrib.auth import get_user_model +from rest_framework.test import APIClient +from rest_framework_simplejwt.tokens import RefreshToken + +from . import services + +User = get_user_model() + + +class BackupServicesTestCase(TestCase): + """Test cases for backup services""" + + def setUp(self): + self.temp_backup_dir = tempfile.mkdtemp() + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + @patch('apps.backups.services.settings') + def test_get_backup_dir_creates_directory(self, mock_settings): + """Test that get_backup_dir creates the directory if it doesn't exist""" + mock_settings.BACKUP_ROOT = self.temp_backup_dir + + with patch('apps.backups.services.Path') as mock_path: + mock_path_instance = MagicMock() + mock_path_instance.mkdir = MagicMock() + mock_path.return_value = mock_path_instance + + services.get_backup_dir() + mock_path_instance.mkdir.assert_called_once_with(parents=True, exist_ok=True) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._dump_sqlite') + def test_create_backup_success_sqlite(self, mock_dump_sqlite, mock_is_pg, mock_get_backup_dir): + """Test successful backup creation with SQLite""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + mock_is_pg.return_value = False + + # Mock SQLite dump to create a temp file + def mock_dump(output_file): + output_file.write_text("sqlite dump") + + mock_dump_sqlite.side_effect = mock_dump + + result = services.create_backup() + + self.assertIsInstance(result, Path) + self.assertTrue(result.exists()) + self.assertTrue(result.name.startswith('dispatcharr-backup-')) + self.assertTrue(result.name.endswith('.zip')) + + # Verify the backup contains expected files + with ZipFile(result, 'r') as zf: + names = zf.namelist() + self.assertIn('database.sqlite3', names) + self.assertIn('metadata.json', names) + + # Check metadata + metadata = json.loads(zf.read('metadata.json')) + self.assertEqual(metadata['version'], 2) + self.assertEqual(metadata['database_type'], 'sqlite') + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._dump_postgresql') + def test_create_backup_success_postgresql(self, mock_dump_pg, mock_is_pg, mock_get_backup_dir): + """Test successful backup creation with PostgreSQL""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + mock_is_pg.return_value = True + + # Mock PostgreSQL dump to create a temp file + def mock_dump(output_file): + output_file.write_bytes(b"pg dump data") + + mock_dump_pg.side_effect = mock_dump + + result = services.create_backup() + + self.assertIsInstance(result, Path) + self.assertTrue(result.exists()) + + # Verify the backup contains expected files + with ZipFile(result, 'r') as zf: + names = zf.namelist() + self.assertIn('database.dump', names) + self.assertIn('metadata.json', names) + + # Check metadata + metadata = json.loads(zf.read('metadata.json')) + self.assertEqual(metadata['version'], 2) + self.assertEqual(metadata['database_type'], 'postgresql') + + @patch('apps.backups.services.get_backup_dir') + def test_list_backups_empty(self, mock_get_backup_dir): + """Test listing backups when none exist""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + result = services.list_backups() + + self.assertEqual(result, []) + + @patch('apps.backups.services.get_backup_dir') + def test_list_backups_with_files(self, mock_get_backup_dir): + """Test listing backups with existing backup files""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a fake backup file + test_backup = backup_dir / "dispatcharr-backup-2025.01.01.12.00.00.zip" + test_backup.write_text("fake backup content") + + result = services.list_backups() + + self.assertEqual(len(result), 1) + self.assertEqual(result[0]['name'], test_backup.name) + self.assertIn('size', result[0]) + self.assertIn('created', result[0]) + + @patch('apps.backups.services.get_backup_dir') + def test_delete_backup_success(self, mock_get_backup_dir): + """Test successful backup deletion""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a fake backup file + test_backup = backup_dir / "dispatcharr-backup-test.zip" + test_backup.write_text("fake backup content") + + self.assertTrue(test_backup.exists()) + + services.delete_backup(test_backup.name) + + self.assertFalse(test_backup.exists()) + + @patch('apps.backups.services.get_backup_dir') + def test_delete_backup_not_found(self, mock_get_backup_dir): + """Test deleting a non-existent backup raises error""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + with self.assertRaises(FileNotFoundError): + services.delete_backup("nonexistent-backup.zip") + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._restore_postgresql') + def test_restore_backup_postgresql(self, mock_restore_pg, mock_is_pg, mock_get_backup_dir): + """Test successful restoration of PostgreSQL backup""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True + + # Create PostgreSQL backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.dump', b'pg dump data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'postgresql', + 'database_file': 'database.dump' + })) + + services.restore_backup(backup_file) + + mock_restore_pg.assert_called_once() + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + @patch('apps.backups.services._restore_sqlite') + def test_restore_backup_sqlite(self, mock_restore_sqlite, mock_is_pg, mock_get_backup_dir): + """Test successful restoration of SQLite backup""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = False + + # Create SQLite backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.sqlite3', 'sqlite data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'sqlite', + 'database_file': 'database.sqlite3' + })) + + services.restore_backup(backup_file) + + mock_restore_sqlite.assert_called_once() + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + def test_restore_backup_database_type_mismatch(self, mock_is_pg, mock_get_backup_dir): + """Test restore fails when database type doesn't match""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True # Current system is PostgreSQL + + # Create SQLite backup file + backup_file = backup_dir / "test-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.sqlite3', 'sqlite data') + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'sqlite', # Backup is SQLite + 'database_file': 'database.sqlite3' + })) + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('mismatch', str(context.exception).lower()) + + def test_restore_backup_not_found(self): + """Test restoring from non-existent backup file""" + fake_path = Path("/tmp/nonexistent-backup-12345.zip") + + with self.assertRaises(FileNotFoundError): + services.restore_backup(fake_path) + + @patch('apps.backups.services.get_backup_dir') + def test_restore_backup_missing_metadata(self, mock_get_backup_dir): + """Test restoring from backup without metadata.json""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a backup file missing metadata.json + backup_file = backup_dir / "invalid-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('database.dump', b'fake dump data') + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('metadata.json', str(context.exception)) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.services._is_postgresql') + def test_restore_backup_missing_database(self, mock_is_pg, mock_get_backup_dir): + """Test restoring from backup missing database dump""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_is_pg.return_value = True + + # Create backup file missing database dump + backup_file = backup_dir / "invalid-backup.zip" + with ZipFile(backup_file, 'w') as zf: + zf.writestr('metadata.json', json.dumps({ + 'version': 2, + 'database_type': 'postgresql', + 'database_file': 'database.dump' + })) + + with self.assertRaises(ValueError) as context: + services.restore_backup(backup_file) + + self.assertIn('database.dump', str(context.exception)) + + +class BackupAPITestCase(TestCase): + """Test cases for backup API endpoints""" + + def setUp(self): + self.client = APIClient() + self.user = User.objects.create_user( + username='testuser', + email='test@example.com', + password='testpass123' + ) + self.admin_user = User.objects.create_superuser( + username='admin', + email='admin@example.com', + password='adminpass123' + ) + self.temp_backup_dir = tempfile.mkdtemp() + + def get_auth_header(self, user): + """Helper method to get JWT auth header for a user""" + refresh = RefreshToken.for_user(user) + return f'Bearer {str(refresh.access_token)}' + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + def test_list_backups_requires_admin(self): + """Test that listing backups requires admin privileges""" + url = '/api/backups/' + + # Unauthenticated request + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.services.list_backups') + def test_list_backups_success(self, mock_list_backups): + """Test successful backup listing""" + mock_list_backups.return_value = [ + { + 'name': 'backup-test.zip', + 'size': 1024, + 'created': '2025-01-01T12:00:00' + } + ] + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(len(data), 1) + self.assertEqual(data[0]['name'], 'backup-test.zip') + + def test_create_backup_requires_admin(self): + """Test that creating backups requires admin privileges""" + url = '/api/backups/create/' + + # Unauthenticated request + response = self.client.post(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.post(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.tasks.create_backup_task.delay') + def test_create_backup_success(self, mock_create_task): + """Test successful backup creation via API (async task)""" + mock_task = MagicMock() + mock_task.id = 'test-task-id-123' + mock_create_task.return_value = mock_task + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/create/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 202) + data = response.json() + self.assertIn('task_id', data) + self.assertIn('task_token', data) + self.assertEqual(data['task_id'], 'test-task-id-123') + + @patch('apps.backups.tasks.create_backup_task.delay') + def test_create_backup_failure(self, mock_create_task): + """Test backup creation failure handling""" + mock_create_task.side_effect = Exception("Failed to start task") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/create/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 500) + data = response.json() + self.assertIn('detail', data) + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_success(self, mock_get_backup_dir): + """Test successful backup download""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/download/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + self.assertEqual(response['Content-Type'], 'application/zip') + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_not_found(self, mock_get_backup_dir): + """Test downloading non-existent backup""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/download/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + @patch('apps.backups.services.delete_backup') + def test_delete_backup_success(self, mock_delete_backup): + """Test successful backup deletion via API""" + mock_delete_backup.return_value = None + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/delete/' + response = self.client.delete(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 204) + mock_delete_backup.assert_called_once_with('test-backup.zip') + + @patch('apps.backups.services.delete_backup') + def test_delete_backup_not_found(self, mock_delete_backup): + """Test deleting non-existent backup via API""" + mock_delete_backup.side_effect = FileNotFoundError("Not found") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/delete/' + response = self.client.delete(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + def test_upload_backup_requires_file(self): + """Test that upload requires a file""" + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/upload/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 400) + data = response.json() + self.assertIn('No file uploaded', data['detail']) + + @patch('apps.backups.services.get_backup_dir') + def test_upload_backup_success(self, mock_get_backup_dir): + """Test successful backup upload""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + # Create a fake backup file + fake_backup = BytesIO(b"fake backup content") + fake_backup.name = 'uploaded-backup.zip' + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/upload/' + response = self.client.post(url, {'file': fake_backup}, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 201) + data = response.json() + self.assertIn('filename', data) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.tasks.restore_backup_task.delay') + def test_restore_backup_success(self, mock_restore_task, mock_get_backup_dir): + """Test successful backup restoration via API (async task)""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + mock_task = MagicMock() + mock_task.id = 'test-restore-task-456' + mock_restore_task.return_value = mock_task + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 202) + data = response.json() + self.assertIn('task_id', data) + self.assertIn('task_token', data) + self.assertEqual(data['task_id'], 'test-restore-task-456') + + @patch('apps.backups.services.get_backup_dir') + def test_restore_backup_not_found(self, mock_get_backup_dir): + """Test restoring from non-existent backup via API""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + # --- Backup Status Endpoint Tests --- + + def test_backup_status_requires_auth_or_token(self): + """Test that backup_status requires auth or valid token""" + url = '/api/backups/status/fake-task-id/' + + # Unauthenticated request without token + response = self.client.get(url) + self.assertEqual(response.status_code, 401) + + def test_backup_status_invalid_token(self): + """Test that backup_status rejects invalid tokens""" + url = '/api/backups/status/fake-task-id/?token=invalid-token' + response = self.client.get(url) + self.assertEqual(response.status_code, 403) + + @patch('apps.backups.api_views.AsyncResult') + def test_backup_status_with_admin_auth(self, mock_async_result): + """Test backup_status with admin authentication""" + mock_result = MagicMock() + mock_result.ready.return_value = False + mock_result.failed.return_value = False + mock_result.state = 'PENDING' + mock_async_result.return_value = mock_result + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/status/test-task-id/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'pending') + + @patch('apps.backups.api_views.AsyncResult') + @patch('apps.backups.api_views._verify_task_token') + def test_backup_status_with_valid_token(self, mock_verify, mock_async_result): + """Test backup_status with valid token""" + mock_verify.return_value = True + mock_result = MagicMock() + mock_result.ready.return_value = True + mock_result.get.return_value = {'status': 'completed', 'filename': 'test.zip'} + mock_async_result.return_value = mock_result + + url = '/api/backups/status/test-task-id/?token=valid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'completed') + + @patch('apps.backups.api_views.AsyncResult') + def test_backup_status_task_failed(self, mock_async_result): + """Test backup_status when task failed""" + mock_result = MagicMock() + mock_result.ready.return_value = True + mock_result.get.return_value = {'status': 'failed', 'error': 'Something went wrong'} + mock_async_result.return_value = mock_result + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/status/test-task-id/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['state'], 'failed') + self.assertIn('Something went wrong', data['error']) + + # --- Download Token Endpoint Tests --- + + def test_get_download_token_requires_admin(self): + """Test that get_download_token requires admin privileges""" + url = '/api/backups/test.zip/download-token/' + + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.services.get_backup_dir') + def test_get_download_token_success(self, mock_get_backup_dir): + """Test successful download token generation""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/download-token/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertIn('token', data) + self.assertEqual(len(data['token']), 32) + + @patch('apps.backups.services.get_backup_dir') + def test_get_download_token_not_found(self, mock_get_backup_dir): + """Test download token for non-existent file""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/nonexistent.zip/download-token/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 404) + + # --- Download with Token Auth Tests --- + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.api_views._verify_task_token') + def test_download_backup_with_valid_token(self, mock_verify, mock_get_backup_dir): + """Test downloading backup with valid token (no auth header)""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_verify.return_value = True + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test backup content") + + url = '/api/backups/test-backup.zip/download/?token=valid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 200) + + @patch('apps.backups.services.get_backup_dir') + def test_download_backup_invalid_token(self, mock_get_backup_dir): + """Test downloading backup with invalid token""" + mock_get_backup_dir.return_value = Path(self.temp_backup_dir) + + url = '/api/backups/test-backup.zip/download/?token=invalid-token' + response = self.client.get(url) + + self.assertEqual(response.status_code, 403) + + @patch('apps.backups.services.get_backup_dir') + @patch('apps.backups.tasks.restore_backup_task.delay') + def test_restore_backup_task_start_failure(self, mock_restore_task, mock_get_backup_dir): + """Test restore task start failure via API""" + backup_dir = Path(self.temp_backup_dir) + mock_get_backup_dir.return_value = backup_dir + mock_restore_task.side_effect = Exception("Failed to start restore task") + + # Create a test backup file + backup_file = backup_dir / "test-backup.zip" + backup_file.write_text("test content") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/test-backup.zip/restore/' + response = self.client.post(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 500) + data = response.json() + self.assertIn('detail', data) + + def test_get_schedule_requires_admin(self): + """Test that getting schedule requires admin privileges""" + url = '/api/backups/schedule/' + + # Unauthenticated request + response = self.client.get(url) + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.get(url, HTTP_AUTHORIZATION=self.get_auth_header(self.user)) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.api_views.get_schedule_settings') + def test_get_schedule_success(self, mock_get_settings): + """Test successful schedule retrieval""" + mock_get_settings.return_value = { + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + 'day_of_week': 0, + 'retention_count': 5, + 'cron_expression': '', + } + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/' + response = self.client.get(url, HTTP_AUTHORIZATION=auth_header) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['enabled'], True) + self.assertEqual(data['frequency'], 'daily') + self.assertEqual(data['retention_count'], 5) + + def test_update_schedule_requires_admin(self): + """Test that updating schedule requires admin privileges""" + url = '/api/backups/schedule/update/' + + # Unauthenticated request + response = self.client.put(url, {}, content_type='application/json') + self.assertIn(response.status_code, [401, 403]) + + # Regular user request + response = self.client.put( + url, + {}, + content_type='application/json', + HTTP_AUTHORIZATION=self.get_auth_header(self.user) + ) + self.assertIn(response.status_code, [401, 403]) + + @patch('apps.backups.api_views.update_schedule_settings') + def test_update_schedule_success(self, mock_update_settings): + """Test successful schedule update""" + mock_update_settings.return_value = { + 'enabled': True, + 'frequency': 'weekly', + 'time': '02:00', + 'day_of_week': 1, + 'retention_count': 10, + 'cron_expression': '', + } + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/update/' + response = self.client.put( + url, + {'enabled': True, 'frequency': 'weekly', 'time': '02:00', 'day_of_week': 1, 'retention_count': 10}, + content_type='application/json', + HTTP_AUTHORIZATION=auth_header + ) + + self.assertEqual(response.status_code, 200) + data = response.json() + self.assertEqual(data['frequency'], 'weekly') + self.assertEqual(data['day_of_week'], 1) + + @patch('apps.backups.api_views.update_schedule_settings') + def test_update_schedule_validation_error(self, mock_update_settings): + """Test schedule update with invalid data""" + mock_update_settings.side_effect = ValueError("frequency must be 'daily' or 'weekly'") + + auth_header = self.get_auth_header(self.admin_user) + url = '/api/backups/schedule/update/' + response = self.client.put( + url, + {'frequency': 'invalid'}, + content_type='application/json', + HTTP_AUTHORIZATION=auth_header + ) + + self.assertEqual(response.status_code, 400) + data = response.json() + self.assertIn('frequency', data['detail']) + + +class BackupSchedulerTestCase(TestCase): + """Test cases for backup scheduler""" + + databases = {'default'} + + @classmethod + def setUpClass(cls): + pass + + @classmethod + def tearDownClass(cls): + pass + + def setUp(self): + from core.models import CoreSettings + # Clean up any existing settings + CoreSettings.objects.filter(key__startswith='backup_').delete() + + def tearDown(self): + from core.models import CoreSettings + from django_celery_beat.models import PeriodicTask + CoreSettings.objects.filter(key__startswith='backup_').delete() + PeriodicTask.objects.filter(name='backup-scheduled-task').delete() + + def test_get_schedule_settings_defaults(self): + """Test that get_schedule_settings returns defaults when no settings exist""" + from . import scheduler + + settings = scheduler.get_schedule_settings() + + self.assertEqual(settings['enabled'], False) + self.assertEqual(settings['frequency'], 'daily') + self.assertEqual(settings['time'], '03:00') + self.assertEqual(settings['day_of_week'], 0) + self.assertEqual(settings['retention_count'], 0) + self.assertEqual(settings['cron_expression'], '') + + def test_update_schedule_settings_stores_values(self): + """Test that update_schedule_settings stores values correctly""" + from . import scheduler + + result = scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'time': '04:30', + 'day_of_week': 3, + 'retention_count': 7, + }) + + self.assertEqual(result['enabled'], True) + self.assertEqual(result['frequency'], 'weekly') + self.assertEqual(result['time'], '04:30') + self.assertEqual(result['day_of_week'], 3) + self.assertEqual(result['retention_count'], 7) + + # Verify persistence + settings = scheduler.get_schedule_settings() + self.assertEqual(settings['enabled'], True) + self.assertEqual(settings['frequency'], 'weekly') + + def test_update_schedule_settings_invalid_frequency(self): + """Test that invalid frequency raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'frequency': 'monthly'}) + + self.assertIn('frequency', str(context.exception).lower()) + + def test_update_schedule_settings_invalid_time(self): + """Test that invalid time raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'time': 'invalid'}) + + self.assertIn('HH:MM', str(context.exception)) + + def test_update_schedule_settings_invalid_day_of_week(self): + """Test that invalid day_of_week raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'day_of_week': 7}) + + self.assertIn('day_of_week', str(context.exception).lower()) + + def test_update_schedule_settings_invalid_retention(self): + """Test that negative retention_count raises ValueError""" + from . import scheduler + + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({'retention_count': -1}) + + self.assertIn('retention_count', str(context.exception).lower()) + + def test_sync_creates_periodic_task_when_enabled(self): + """Test that enabling schedule creates a PeriodicTask""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '05:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertTrue(task.enabled) + self.assertEqual(task.crontab.hour, '05') + self.assertEqual(task.crontab.minute, '00') + + def test_sync_deletes_periodic_task_when_disabled(self): + """Test that disabling schedule removes PeriodicTask""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + # First enable + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '05:00', + }) + + self.assertTrue(PeriodicTask.objects.filter(name='backup-scheduled-task').exists()) + + # Then disable + scheduler.update_schedule_settings({'enabled': False}) + + self.assertFalse(PeriodicTask.objects.filter(name='backup-scheduled-task').exists()) + + def test_weekly_schedule_sets_day_of_week(self): + """Test that weekly schedule sets correct day_of_week in crontab""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'time': '06:00', + 'day_of_week': 3, # Wednesday + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.day_of_week, '3') + + def test_cron_expression_stores_value(self): + """Test that cron_expression is stored and retrieved correctly""" + from . import scheduler + + result = scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '*/5 * * * *', + }) + + self.assertEqual(result['cron_expression'], '*/5 * * * *') + + # Verify persistence + settings = scheduler.get_schedule_settings() + self.assertEqual(settings['cron_expression'], '*/5 * * * *') + + def test_cron_expression_creates_correct_schedule(self): + """Test that cron expression creates correct CrontabSchedule""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '*/15 2 * * 1-5', # Every 15 mins during 2 AM hour on weekdays + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '*/15') + self.assertEqual(task.crontab.hour, '2') + self.assertEqual(task.crontab.day_of_month, '*') + self.assertEqual(task.crontab.month_of_year, '*') + self.assertEqual(task.crontab.day_of_week, '1-5') + + def test_cron_expression_invalid_format(self): + """Test that invalid cron expression raises ValueError""" + from . import scheduler + + # Too few parts + with self.assertRaises(ValueError) as context: + scheduler.update_schedule_settings({ + 'enabled': True, + 'cron_expression': '0 3 *', + }) + self.assertIn('5 parts', str(context.exception)) + + def test_cron_expression_empty_uses_simple_mode(self): + """Test that empty cron_expression falls back to simple frequency mode""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '04:00', + 'cron_expression': '', # Empty, should use simple mode + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '00') + self.assertEqual(task.crontab.hour, '04') + self.assertEqual(task.crontab.day_of_week, '*') + + def test_cron_expression_overrides_simple_settings(self): + """Test that cron_expression takes precedence over frequency/time""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + 'cron_expression': '0 */6 * * *', # Every 6 hours (should override daily at 3 AM) + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(task.crontab.minute, '0') + self.assertEqual(task.crontab.hour, '*/6') + self.assertEqual(task.crontab.day_of_week, '*') + + def test_periodic_task_uses_system_timezone(self): + """Test that CrontabSchedule is created with the system timezone""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + from core.models import CoreSettings + + original_tz = CoreSettings.get_system_time_zone() + + try: + # Set a non-UTC timezone + CoreSettings.set_system_time_zone('America/New_York') + + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(str(task.crontab.timezone), 'America/New_York') + finally: + scheduler.update_schedule_settings({'enabled': False}) + CoreSettings.set_system_time_zone(original_tz) + + def test_periodic_task_timezone_updates_with_schedule(self): + """Test that CrontabSchedule timezone is updated when schedule is modified""" + from . import scheduler + from django_celery_beat.models import PeriodicTask + from core.models import CoreSettings + + original_tz = CoreSettings.get_system_time_zone() + + try: + # Create initial schedule with one timezone + CoreSettings.set_system_time_zone('America/Los_Angeles') + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '02:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + self.assertEqual(str(task.crontab.timezone), 'America/Los_Angeles') + + # Change system timezone and update schedule + CoreSettings.set_system_time_zone('Europe/London') + scheduler.update_schedule_settings({ + 'enabled': True, + 'time': '04:00', + }) + + task.refresh_from_db() + self.assertEqual(str(task.crontab.timezone), 'Europe/London') + finally: + scheduler.update_schedule_settings({'enabled': False}) + CoreSettings.set_system_time_zone(original_tz) + + def test_orphaned_crontab_cleanup(self): + """Test that old CrontabSchedule is deleted when schedule changes""" + from . import scheduler + from django_celery_beat.models import PeriodicTask, CrontabSchedule + + # Create initial daily schedule + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'daily', + 'time': '03:00', + }) + + task = PeriodicTask.objects.get(name='backup-scheduled-task') + first_crontab_id = task.crontab.id + initial_count = CrontabSchedule.objects.count() + + # Change to weekly schedule (different crontab) + scheduler.update_schedule_settings({ + 'enabled': True, + 'frequency': 'weekly', + 'day_of_week': 3, + 'time': '03:00', + }) + + task.refresh_from_db() + second_crontab_id = task.crontab.id + + # Verify old crontab was deleted + self.assertNotEqual(first_crontab_id, second_crontab_id) + self.assertFalse(CrontabSchedule.objects.filter(id=first_crontab_id).exists()) + self.assertEqual(CrontabSchedule.objects.count(), initial_count) + + # Cleanup + scheduler.update_schedule_settings({'enabled': False}) + + +class BackupTasksTestCase(TestCase): + """Test cases for backup Celery tasks""" + + def setUp(self): + self.temp_backup_dir = tempfile.mkdtemp() + + def tearDown(self): + import shutil + if Path(self.temp_backup_dir).exists(): + shutil.rmtree(self.temp_backup_dir) + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_keeps_recent(self, mock_delete, mock_list): + """Test that cleanup keeps the most recent backups""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-3.zip'}, # newest + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, # oldest + ] + + deleted = _cleanup_old_backups(retention_count=2) + + self.assertEqual(deleted, 1) + mock_delete.assert_called_once_with('backup-1.zip') + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_does_nothing_when_under_limit(self, mock_delete, mock_list): + """Test that cleanup does nothing when under retention limit""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, + ] + + deleted = _cleanup_old_backups(retention_count=5) + + self.assertEqual(deleted, 0) + mock_delete.assert_not_called() + + @patch('apps.backups.tasks.services.list_backups') + @patch('apps.backups.tasks.services.delete_backup') + def test_cleanup_old_backups_zero_retention_keeps_all(self, mock_delete, mock_list): + """Test that retention_count=0 keeps all backups""" + from .tasks import _cleanup_old_backups + + mock_list.return_value = [ + {'name': 'backup-3.zip'}, + {'name': 'backup-2.zip'}, + {'name': 'backup-1.zip'}, + ] + + deleted = _cleanup_old_backups(retention_count=0) + + self.assertEqual(deleted, 0) + mock_delete.assert_not_called() + + @patch('apps.backups.tasks.services.create_backup') + @patch('apps.backups.tasks._cleanup_old_backups') + def test_scheduled_backup_task_success(self, mock_cleanup, mock_create): + """Test scheduled backup task success""" + from .tasks import scheduled_backup_task + + mock_backup_file = MagicMock() + mock_backup_file.name = 'scheduled-backup.zip' + mock_backup_file.stat.return_value.st_size = 1024 + mock_create.return_value = mock_backup_file + mock_cleanup.return_value = 2 + + result = scheduled_backup_task(retention_count=5) + + self.assertEqual(result['status'], 'completed') + self.assertEqual(result['filename'], 'scheduled-backup.zip') + self.assertEqual(result['size'], 1024) + self.assertEqual(result['deleted_count'], 2) + mock_cleanup.assert_called_once_with(5) + + @patch('apps.backups.tasks.services.create_backup') + @patch('apps.backups.tasks._cleanup_old_backups') + def test_scheduled_backup_task_no_cleanup_when_retention_zero(self, mock_cleanup, mock_create): + """Test scheduled backup skips cleanup when retention is 0""" + from .tasks import scheduled_backup_task + + mock_backup_file = MagicMock() + mock_backup_file.name = 'scheduled-backup.zip' + mock_backup_file.stat.return_value.st_size = 1024 + mock_create.return_value = mock_backup_file + + result = scheduled_backup_task(retention_count=0) + + self.assertEqual(result['status'], 'completed') + self.assertEqual(result['deleted_count'], 0) + mock_cleanup.assert_not_called() + + @patch('apps.backups.tasks.services.create_backup') + def test_scheduled_backup_task_failure(self, mock_create): + """Test scheduled backup task handles failure""" + from .tasks import scheduled_backup_task + + mock_create.side_effect = Exception("Backup failed") + + result = scheduled_backup_task(retention_count=5) + + self.assertEqual(result['status'], 'failed') + self.assertIn('Backup failed', result['error']) diff --git a/apps/channels/api_urls.py b/apps/channels/api_urls.py index 7999abd93..bd53ae45d 100644 --- a/apps/channels/api_urls.py +++ b/apps/channels/api_urls.py @@ -47,7 +47,7 @@ path('series-rules/', SeriesRulesAPIView.as_view(), name='series_rules'), path('series-rules/evaluate/', EvaluateSeriesRulesAPIView.as_view(), name='evaluate_series_rules'), path('series-rules/bulk-remove/', BulkRemoveSeriesRecordingsAPIView.as_view(), name='bulk_remove_series_recordings'), - path('series-rules//', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'), + path('series-rules//', DeleteSeriesRuleAPIView.as_view(), name='delete_series_rule'), path('recordings/bulk-delete-upcoming/', BulkDeleteUpcomingRecordingsAPIView.as_view(), name='bulk_delete_upcoming_recordings'), path('dvr/comskip-config/', ComskipConfigAPIView.as_view(), name='comskip_config'), ] diff --git a/apps/channels/api_views.py b/apps/channels/api_views.py index bc9205379..aebb74a38 100644 --- a/apps/channels/api_views.py +++ b/apps/channels/api_views.py @@ -8,7 +8,9 @@ from drf_yasg import openapi from django.shortcuts import get_object_or_404, get_list_or_404 from django.db import transaction +from django.db.models import Q import os, json, requests, logging +from urllib.parse import unquote from apps.accounts.permissions import ( Authenticated, IsAdmin, @@ -124,7 +126,7 @@ class StreamViewSet(viewsets.ModelViewSet): filter_backends = [DjangoFilterBackend, SearchFilter, OrderingFilter] filterset_class = StreamFilter search_fields = ["name", "channel_group__name"] - ordering_fields = ["name", "channel_group__name"] + ordering_fields = ["name", "channel_group__name", "m3u_account__name"] ordering = ["-name"] def get_permissions(self): @@ -419,10 +421,36 @@ def get_queryset(self): group_names = channel_group.split(",") qs = qs.filter(channel_group__name__in=group_names) + filters = {} + q_filters = Q() + + channel_profile_id = self.request.query_params.get("channel_profile_id") + show_disabled_param = self.request.query_params.get("show_disabled", None) + only_streamless = self.request.query_params.get("only_streamless", None) + + if channel_profile_id: + try: + profile_id_int = int(channel_profile_id) + filters["channelprofilemembership__channel_profile_id"] = profile_id_int + + if show_disabled_param is None: + filters["channelprofilemembership__enabled"] = True + except (ValueError, TypeError): + # Ignore invalid profile id values + pass + + if only_streamless: + q_filters &= Q(streams__isnull=True) + if self.request.user.user_level < 10: - qs = qs.filter(user_level__lte=self.request.user.user_level) + filters["user_level__lte"] = self.request.user.user_level - return qs + if filters: + qs = qs.filter(**filters) + if q_filters: + qs = qs.filter(q_filters) + + return qs.distinct() def get_serializer_context(self): context = super().get_serializer_context() @@ -2026,7 +2054,7 @@ def get_permissions(self): return [Authenticated()] def delete(self, request, tvg_id): - tvg_id = str(tvg_id) + tvg_id = unquote(str(tvg_id or "")) rules = [r for r in CoreSettings.get_dvr_series_rules() if str(r.get("tvg_id")) != tvg_id] CoreSettings.set_dvr_series_rules(rules) return Response({"success": True, "rules": rules}) diff --git a/apps/channels/tasks.py b/apps/channels/tasks.py index 5a9528a78..7ca73ac28 100755 --- a/apps/channels/tasks.py +++ b/apps/channels/tasks.py @@ -295,7 +295,11 @@ def match_channels_to_epg(channels_data, epg_data, region_code=None, use_ml=True if score > 50: # Only show decent matches logger.debug(f" EPG '{row['name']}' (norm: '{row['norm_name']}') => score: {score} (base: {base_score}, bonus: {bonus})") - if score > best_score: + # When scores are equal, prefer higher priority EPG source + row_priority = row.get('epg_source_priority', 0) + best_priority = best_epg.get('epg_source_priority', 0) if best_epg else -1 + + if score > best_score or (score == best_score and row_priority > best_priority): best_score = score best_epg = row @@ -471,9 +475,9 @@ def match_epg_channels(): "norm_chan": normalize_name(channel.name) # Always use channel name for fuzzy matching! }) - # Get all EPG data + # Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches epg_data = [] - for epg in EPGData.objects.all(): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True): normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data.append({ 'id': epg.id, @@ -482,9 +486,13 @@ def match_epg_channels(): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) - logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries") + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True) + + logger.info(f"Processing {len(channels_data)} channels against {len(epg_data)} EPG entries (from active sources only)") # Run EPG matching with progress updates - automatically uses conservative thresholds for bulk operations result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) @@ -618,9 +626,9 @@ def match_selected_channels_epg(channel_ids): "norm_chan": normalize_name(channel.name) }) - # Get all EPG data + # Get all EPG data from active sources, ordered by source priority (highest first) so we prefer higher priority matches epg_data = [] - for epg in EPGData.objects.all(): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True): normalized_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data.append({ 'id': epg.id, @@ -629,9 +637,13 @@ def match_selected_channels_epg(channel_ids): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) - logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries") + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data.sort(key=lambda x: x['epg_source_priority'], reverse=True) + + logger.info(f"Processing {len(channels_data)} selected channels against {len(epg_data)} EPG entries (from active sources only)") # Run EPG matching with progress updates - automatically uses appropriate thresholds result = match_channels_to_epg(channels_data, epg_data, region_code, use_ml=True, send_progress=True) @@ -749,9 +761,10 @@ def match_single_channel_epg(channel_id): test_normalized = normalize_name(test_name) logger.debug(f"DEBUG normalization example: '{test_name}' → '{test_normalized}' (call sign preserved)") - # Get all EPG data for matching - must include norm_name field + # Get all EPG data for matching from active sources - must include norm_name field + # Ordered by source priority (highest first) so we prefer higher priority matches epg_data_list = [] - for epg in EPGData.objects.filter(name__isnull=False).exclude(name=''): + for epg in EPGData.objects.select_related('epg_source').filter(epg_source__is_active=True, name__isnull=False).exclude(name=''): normalized_epg_tvg_id = epg.tvg_id.strip().lower() if epg.tvg_id else "" epg_data_list.append({ 'id': epg.id, @@ -760,10 +773,14 @@ def match_single_channel_epg(channel_id): 'name': epg.name, 'norm_name': normalize_name(epg.name), 'epg_source_id': epg.epg_source.id if epg.epg_source else None, + 'epg_source_priority': epg.epg_source.priority if epg.epg_source else 0, }) + # Sort EPG data by source priority (highest first) so we prefer higher priority matches + epg_data_list.sort(key=lambda x: x['epg_source_priority'], reverse=True) + if not epg_data_list: - return {"matched": False, "message": "No EPG data available for matching"} + return {"matched": False, "message": "No EPG data available for matching (from active sources)"} logger.info(f"Matching single channel '{channel.name}' against {len(epg_data_list)} EPG entries") diff --git a/apps/epg/migrations/0021_epgsource_priority.py b/apps/epg/migrations/0021_epgsource_priority.py new file mode 100644 index 000000000..f2696d674 --- /dev/null +++ b/apps/epg/migrations/0021_epgsource_priority.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.4 on 2025-12-05 15:24 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('epg', '0020_migrate_time_to_starttime_placeholders'), + ] + + operations = [ + migrations.AddField( + model_name='epgsource', + name='priority', + field=models.PositiveIntegerField(default=0, help_text='Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel.'), + ), + ] diff --git a/apps/epg/models.py b/apps/epg/models.py index e5f3847bd..b3696edc3 100644 --- a/apps/epg/models.py +++ b/apps/epg/models.py @@ -45,6 +45,10 @@ class EPGSource(models.Model): null=True, help_text="Custom properties for dummy EPG configuration (regex patterns, timezone, duration, etc.)" ) + priority = models.PositiveIntegerField( + default=0, + help_text="Priority for EPG matching (higher numbers = higher priority). Used when multiple EPG sources have matching entries for a channel." + ) status = models.CharField( max_length=20, choices=STATUS_CHOICES, diff --git a/apps/epg/serializers.py b/apps/epg/serializers.py index bfb750fc0..e4d5f4668 100644 --- a/apps/epg/serializers.py +++ b/apps/epg/serializers.py @@ -24,6 +24,7 @@ class Meta: 'is_active', 'file_path', 'refresh_interval', + 'priority', 'status', 'last_message', 'created_at', diff --git a/apps/epg/tasks.py b/apps/epg/tasks.py index 59d658b19..bd78c6a33 100644 --- a/apps/epg/tasks.py +++ b/apps/epg/tasks.py @@ -1393,11 +1393,23 @@ def parse_programs_for_tvg_id(epg_id): def parse_programs_for_source(epg_source, tvg_id=None): + """ + Parse programs for all MAPPED channels from an EPG source in a single pass. + + This is an optimized version that: + 1. Only processes EPG entries that are actually mapped to channels + 2. Parses the XML file ONCE instead of once per channel + 3. Skips programmes for unmapped channels entirely during parsing + + This dramatically improves performance when an EPG source has many channels + but only a fraction are mapped. + """ # Send initial programs parsing notification send_epg_update(epg_source.id, "parsing_programs", 0) should_log_memory = False process = None initial_memory = 0 + source_file = None # Add memory tracking only in trace mode or higher try: @@ -1417,82 +1429,229 @@ def parse_programs_for_source(epg_source, tvg_id=None): should_log_memory = False try: - # Process EPG entries in batches rather than all at once - batch_size = 20 # Process fewer channels at once to reduce memory usage - epg_count = EPGData.objects.filter(epg_source=epg_source).count() + # Only get EPG entries that are actually mapped to channels + mapped_epg_ids = set( + Channel.objects.filter( + epg_data__epg_source=epg_source, + epg_data__isnull=False + ).values_list('epg_data_id', flat=True) + ) - if epg_count == 0: - logger.info(f"No EPG entries found for source: {epg_source.name}") - # Update status - this is not an error, just no entries + if not mapped_epg_ids: + total_epg_count = EPGData.objects.filter(epg_source=epg_source).count() + logger.info(f"No channels mapped to any EPG entries from source: {epg_source.name} " + f"(source has {total_epg_count} EPG entries, 0 mapped)") + # Update status - this is not an error, just no mapped entries epg_source.status = 'success' - epg_source.save(update_fields=['status']) + epg_source.last_message = f"No channels mapped to this EPG source ({total_epg_count} entries available)" + epg_source.save(update_fields=['status', 'last_message']) send_epg_update(epg_source.id, "parsing_programs", 100, status="success") return True - logger.info(f"Parsing programs for {epg_count} EPG entries from source: {epg_source.name}") + # Get the mapped EPG entries with their tvg_ids + mapped_epgs = EPGData.objects.filter(id__in=mapped_epg_ids).values('id', 'tvg_id') + tvg_id_to_epg_id = {epg['tvg_id']: epg['id'] for epg in mapped_epgs if epg['tvg_id']} + mapped_tvg_ids = set(tvg_id_to_epg_id.keys()) - failed_entries = [] - program_count = 0 - channel_count = 0 - updated_count = 0 - processed = 0 - # Process in batches using cursor-based approach to limit memory usage - last_id = 0 - while True: - # Get a batch of EPG entries - batch_entries = list(EPGData.objects.filter( - epg_source=epg_source, - id__gt=last_id - ).order_by('id')[:batch_size]) + total_epg_count = EPGData.objects.filter(epg_source=epg_source).count() + mapped_count = len(mapped_tvg_ids) - if not batch_entries: - break # No more entries to process + logger.info(f"Parsing programs for {mapped_count} MAPPED channels from source: {epg_source.name} " + f"(skipping {total_epg_count - mapped_count} unmapped EPG entries)") - # Update last_id for next iteration - last_id = batch_entries[-1].id + # Get the file path + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + if not file_path: + file_path = epg_source.get_cache_file() - # Process this batch - for epg in batch_entries: - if epg.tvg_id: - try: - result = parse_programs_for_tvg_id(epg.id) - if result == "Task already running": - logger.info(f"Program parse for {epg.id} already in progress, skipping") + # Check if the file exists + if not os.path.exists(file_path): + logger.error(f"EPG file not found at: {file_path}") - processed += 1 - progress = min(95, int((processed / epg_count) * 100)) if epg_count > 0 else 50 - send_epg_update(epg_source.id, "parsing_programs", progress) - except Exception as e: - logger.error(f"Error parsing programs for tvg_id={epg.tvg_id}: {e}", exc_info=True) - failed_entries.append(f"{epg.tvg_id}: {str(e)}") + if epg_source.url: + # Update the file path in the database + new_path = epg_source.get_cache_file() + logger.info(f"Updating file_path from '{file_path}' to '{new_path}'") + epg_source.file_path = new_path + epg_source.save(update_fields=['file_path']) + logger.info(f"Fetching new EPG data from URL: {epg_source.url}") - # Force garbage collection after each batch - batch_entries = None # Remove reference to help garbage collection - gc.collect() + # Fetch new data before continuing + fetch_success = fetch_xmltv(epg_source) + + if not fetch_success: + logger.error(f"Failed to fetch EPG data for source: {epg_source.name}") + epg_source.status = 'error' + epg_source.last_message = f"Failed to download EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="Failed to download EPG file") + return False + + # Update file_path with the new location + file_path = epg_source.extracted_file_path if epg_source.extracted_file_path else epg_source.file_path + else: + logger.error(f"No URL provided for EPG source {epg_source.name}, cannot fetch new data") + epg_source.status = 'error' + epg_source.last_message = f"No URL provided, cannot fetch EPG data" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", error="No URL provided") + return False + + # SINGLE PASS PARSING: Parse the XML file once and collect all programs in memory + # We parse FIRST, then do an atomic delete+insert to avoid race conditions + # where clients might see empty/partial EPG data during the transition + all_programs_to_create = [] + programs_by_channel = {tvg_id: 0 for tvg_id in mapped_tvg_ids} # Track count per channel + total_programs = 0 + skipped_programs = 0 + last_progress_update = 0 + + try: + logger.debug(f"Opening file for single-pass parsing: {file_path}") + source_file = open(file_path, 'rb') + + # Stream parse the file using lxml's iterparse + program_parser = etree.iterparse(source_file, events=('end',), tag='programme', remove_blank_text=True, recover=True) + + for _, elem in program_parser: + channel_id = elem.get('channel') + + # Skip programmes for unmapped channels immediately + if channel_id not in mapped_tvg_ids: + skipped_programs += 1 + # Clear element to free memory + clear_element(elem) + continue + + # This programme is for a mapped channel - process it + try: + start_time = parse_xmltv_time(elem.get('start')) + end_time = parse_xmltv_time(elem.get('stop')) + title = None + desc = None + sub_title = None + + # Efficiently process child elements + for child in elem: + if child.tag == 'title': + title = child.text or 'No Title' + elif child.tag == 'desc': + desc = child.text or '' + elif child.tag == 'sub-title': + sub_title = child.text or '' + + if not title: + title = 'No Title' + + # Extract custom properties + custom_props = extract_custom_properties(elem) + custom_properties_json = custom_props if custom_props else None + + epg_id = tvg_id_to_epg_id[channel_id] + all_programs_to_create.append(ProgramData( + epg_id=epg_id, + start_time=start_time, + end_time=end_time, + title=title, + description=desc, + sub_title=sub_title, + tvg_id=channel_id, + custom_properties=custom_properties_json + )) + total_programs += 1 + programs_by_channel[channel_id] += 1 + + # Clear the element to free memory + clear_element(elem) + + # Send progress update (estimate based on programs processed) + if total_programs - last_progress_update >= 5000: + last_progress_update = total_programs + # Cap at 70% during parsing phase (save 30% for DB operations) + progress = min(70, 10 + int((total_programs / max(total_programs + 10000, 1)) * 60)) + send_epg_update(epg_source.id, "parsing_programs", progress, + processed=total_programs, channels=mapped_count) + + # Periodic garbage collection during parsing + if total_programs % 5000 == 0: + gc.collect() - # If there were failures, include them in the message but continue - if failed_entries: - epg_source.status = EPGSource.STATUS_SUCCESS # Still mark as success if some processed - error_summary = f"Failed to parse {len(failed_entries)} of {epg_count} entries" - stats_summary = f"Processed {program_count} programs across {channel_count} channels. Updated: {updated_count}." - epg_source.last_message = f"{stats_summary} Warning: {error_summary}" - epg_source.updated_at = timezone.now() - epg_source.save(update_fields=['status', 'last_message', 'updated_at']) - - # Send completion notification with mixed status - send_epg_update(epg_source.id, "parsing_programs", 100, - status="success", - message=epg_source.last_message) - - # Explicitly release memory of large lists before returning - del failed_entries + except Exception as e: + logger.error(f"Error processing program for {channel_id}: {e}", exc_info=True) + clear_element(elem) + continue + + except etree.XMLSyntaxError as xml_error: + logger.error(f"XML syntax error parsing program data: {xml_error}") + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"XML parsing error: {str(xml_error)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(xml_error)) + return False + except Exception as e: + logger.error(f"Error parsing XML for programs: {e}", exc_info=True) + raise + finally: + if source_file: + source_file.close() + source_file = None + + # Now perform atomic delete + bulk insert + # This ensures clients never see empty/partial EPG data + logger.info(f"Parsed {total_programs} programs, performing atomic database update...") + send_epg_update(epg_source.id, "parsing_programs", 75, message="Updating database...") + + batch_size = 1000 + try: + with transaction.atomic(): + # Delete existing programs for mapped EPGs + deleted_count = ProgramData.objects.filter(epg_id__in=mapped_epg_ids).delete()[0] + logger.debug(f"Deleted {deleted_count} existing programs") + + # Clean up orphaned programs for unmapped EPG entries + unmapped_epg_ids = list(EPGData.objects.filter( + epg_source=epg_source + ).exclude(id__in=mapped_epg_ids).values_list('id', flat=True)) + + if unmapped_epg_ids: + orphaned_count = ProgramData.objects.filter(epg_id__in=unmapped_epg_ids).delete()[0] + if orphaned_count > 0: + logger.info(f"Cleaned up {orphaned_count} orphaned programs for {len(unmapped_epg_ids)} unmapped EPG entries") + + # Bulk insert all new programs in batches within the same transaction + for i in range(0, len(all_programs_to_create), batch_size): + batch = all_programs_to_create[i:i + batch_size] + ProgramData.objects.bulk_create(batch) + + # Update progress during insertion + progress = 75 + int((i / len(all_programs_to_create)) * 20) if all_programs_to_create else 95 + if i % (batch_size * 5) == 0: + send_epg_update(epg_source.id, "parsing_programs", min(95, progress), + message=f"Inserting programs... {i}/{len(all_programs_to_create)}") + + logger.info(f"Atomic update complete: deleted {deleted_count}, inserted {total_programs} programs") + + except Exception as db_error: + logger.error(f"Database error during atomic update: {db_error}", exc_info=True) + epg_source.status = EPGSource.STATUS_ERROR + epg_source.last_message = f"Database error: {str(db_error)}" + epg_source.save(update_fields=['status', 'last_message']) + send_epg_update(epg_source.id, "parsing_programs", 100, status="error", message=str(db_error)) + return False + finally: + # Clear the large list to free memory + all_programs_to_create = None gc.collect() - return True + # Count channels that actually got programs + channels_with_programs = sum(1 for count in programs_by_channel.values() if count > 0) - # If all successful, set a comprehensive success message + # Success message epg_source.status = EPGSource.STATUS_SUCCESS - epg_source.last_message = f"Successfully processed {program_count} programs across {channel_count} channels. Updated: {updated_count}." + epg_source.last_message = ( + f"Parsed {total_programs:,} programs for {channels_with_programs} channels " + f"(skipped {skipped_programs:,} programs for {total_epg_count - mapped_count} unmapped channels)" + ) epg_source.updated_at = timezone.now() epg_source.save(update_fields=['status', 'last_message', 'updated_at']) @@ -1500,17 +1659,21 @@ def parse_programs_for_source(epg_source, tvg_id=None): log_system_event( event_type='epg_refresh', source_name=epg_source.name, - programs=program_count, - channels=channel_count, - updated=updated_count, + programs=total_programs, + channels=channels_with_programs, + skipped_programs=skipped_programs, + unmapped_channels=total_epg_count - mapped_count, ) # Send completion notification with status send_epg_update(epg_source.id, "parsing_programs", 100, status="success", - message=epg_source.last_message) + message=epg_source.last_message, + updated_at=epg_source.updated_at.isoformat()) - logger.info(f"Completed parsing all programs for source: {epg_source.name}") + logger.info(f"Completed parsing programs for source: {epg_source.name} - " + f"{total_programs:,} programs for {channels_with_programs} channels, " + f"skipped {skipped_programs:,} programs for unmapped channels") return True except Exception as e: @@ -1525,14 +1688,19 @@ def parse_programs_for_source(epg_source, tvg_id=None): return False finally: # Final memory cleanup and tracking - + if source_file: + try: + source_file.close() + except: + pass + source_file = None # Explicitly release any remaining large data structures - failed_entries = None - program_count = None - channel_count = None - updated_count = None - processed = None + programs_to_create = None + programs_by_channel = None + mapped_epg_ids = None + mapped_tvg_ids = None + tvg_id_to_epg_id = None gc.collect() # Add comprehensive memory cleanup at the end diff --git a/apps/output/views.py b/apps/output/views.py index bc2bace5f..1710fa4d0 100644 --- a/apps/output/views.py +++ b/apps/output/views.py @@ -161,18 +161,7 @@ def generate_m3u(request, profile_name=None, user=None): channelprofilemembership__enabled=True ).order_by('channel_number') else: - if profile_name is not None: - try: - channel_profile = ChannelProfile.objects.get(name=profile_name) - except ChannelProfile.DoesNotExist: - logger.warning("Requested channel profile (%s) during m3u generation does not exist", profile_name) - raise Http404(f"Channel profile '{profile_name}' not found") - channels = Channel.objects.filter( - channelprofilemembership__channel_profile=channel_profile, - channelprofilemembership__enabled=True, - ).order_by("channel_number") - else: - channels = Channel.objects.order_by("channel_number") + channels = Channel.objects.order_by("channel_number") # Check if the request wants to use direct logo URLs instead of cache use_cached_logos = request.GET.get('cachedlogos', 'true').lower() != 'false' @@ -2314,20 +2303,20 @@ def xc_get_epg(request, user, short=False): program_output = { "id": f"{id}", "epg_id": f"{epg_id}", - "title": base64.b64encode(title.encode()).decode(), + "title": base64.b64encode((title or "").encode()).decode(), "lang": "", - "start": start.strftime("%Y%m%d%H%M%S"), - "end": end.strftime("%Y%m%d%H%M%S"), - "description": base64.b64encode(description.encode()).decode(), - "channel_id": channel_num_int, - "start_timestamp": int(start.timestamp()), - "stop_timestamp": int(end.timestamp()), + "start": start.strftime("%Y-%m-%d %H:%M:%S"), + "end": end.strftime("%Y-%m-%d %H:%M:%S"), + "description": base64.b64encode((description or "").encode()).decode(), + "channel_id": str(channel_num_int), + "start_timestamp": str(int(start.timestamp())), + "stop_timestamp": str(int(end.timestamp())), "stream_id": f"{channel_id}", } if short == False: program_output["now_playing"] = 1 if start <= django_timezone.now() <= end else 0 - program_output["has_archive"] = "0" + program_output["has_archive"] = 0 output['epg_listings'].append(program_output) @@ -2532,34 +2521,45 @@ def xc_get_series_info(request, user, series_id): except Exception as e: logger.error(f"Error refreshing series data for relation {series_relation.id}: {str(e)}") - # Get episodes for this series from the same M3U account - episode_relations = M3UEpisodeRelation.objects.filter( - episode__series=series, - m3u_account=series_relation.m3u_account - ).select_related('episode').order_by('episode__season_number', 'episode__episode_number') + # Get unique episodes for this series that have relations from any active M3U account + # We query episodes directly to avoid duplicates when multiple relations exist + # (e.g., same episode in different languages/qualities) + from apps.vod.models import Episode + episodes = Episode.objects.filter( + series=series, + m3u_relations__m3u_account__is_active=True + ).distinct().order_by('season_number', 'episode_number') # Group episodes by season seasons = {} - for relation in episode_relations: - episode = relation.episode + for episode in episodes: season_num = episode.season_number or 1 if season_num not in seasons: seasons[season_num] = [] - # Try to get the highest priority related M3UEpisodeRelation for this episode (for video/audio/bitrate) + # Get the highest priority relation for this episode (for container_extension, video/audio/bitrate) from apps.vod.models import M3UEpisodeRelation - first_relation = M3UEpisodeRelation.objects.filter( - episode=episode + best_relation = M3UEpisodeRelation.objects.filter( + episode=episode, + m3u_account__is_active=True ).select_related('m3u_account').order_by('-m3u_account__priority', 'id').first() + video = audio = bitrate = None - if first_relation and first_relation.custom_properties: - info = first_relation.custom_properties.get('info') - if info and isinstance(info, dict): - info_info = info.get('info') - if info_info and isinstance(info_info, dict): - video = info_info.get('video', {}) - audio = info_info.get('audio', {}) - bitrate = info_info.get('bitrate', 0) + container_extension = "mp4" + added_timestamp = str(int(episode.created_at.timestamp())) + + if best_relation: + container_extension = best_relation.container_extension or "mp4" + added_timestamp = str(int(best_relation.created_at.timestamp())) + if best_relation.custom_properties: + info = best_relation.custom_properties.get('info') + if info and isinstance(info, dict): + info_info = info.get('info') + if info_info and isinstance(info_info, dict): + video = info_info.get('video', {}) + audio = info_info.get('audio', {}) + bitrate = info_info.get('bitrate', 0) + if video is None: video = episode.custom_properties.get('video', {}) if episode.custom_properties else {} if audio is None: @@ -2572,8 +2572,8 @@ def xc_get_series_info(request, user, series_id): "season": season_num, "episode_num": episode.episode_number or 0, "title": episode.name, - "container_extension": relation.container_extension or "mp4", - "added": str(int(relation.created_at.timestamp())), + "container_extension": container_extension, + "added": added_timestamp, "custom_sid": None, "direct_source": "", "info": { @@ -2889,7 +2889,7 @@ def xc_series_stream(request, username, password, stream_id, extension): filters = {"episode_id": stream_id, "m3u_account__is_active": True} try: - episode_relation = M3UEpisodeRelation.objects.select_related('episode').get(**filters) + episode_relation = M3UEpisodeRelation.objects.select_related('episode').filter(**filters).order_by('-m3u_account__priority', 'id').first() except M3UEpisodeRelation.DoesNotExist: return JsonResponse({"error": "Episode not found"}, status=404) diff --git a/apps/proxy/vod_proxy/multi_worker_connection_manager.py b/apps/proxy/vod_proxy/multi_worker_connection_manager.py index fefc87395..251721c54 100644 --- a/apps/proxy/vod_proxy/multi_worker_connection_manager.py +++ b/apps/proxy/vod_proxy/multi_worker_connection_manager.py @@ -24,6 +24,11 @@ logger = logging.getLogger("vod_proxy") +def get_vod_client_stop_key(client_id): + """Get the Redis key for signaling a VOD client to stop""" + return f"vod_proxy:client:{client_id}:stop" + + def infer_content_type_from_url(url: str) -> Optional[str]: """ Infer MIME type from file extension in URL @@ -832,6 +837,7 @@ def stream_content_with_session(self, session_id, content_obj, stream_url, m3u_p # Create streaming generator def stream_generator(): decremented = False + stop_signal_detected = False try: logger.info(f"[{client_id}] Worker {self.worker_id} - Starting Redis-backed stream") @@ -846,14 +852,25 @@ def stream_generator(): bytes_sent = 0 chunk_count = 0 + # Get the stop signal key for this client + stop_key = get_vod_client_stop_key(client_id) + for chunk in upstream_response.iter_content(chunk_size=8192): if chunk: yield chunk bytes_sent += len(chunk) chunk_count += 1 - # Update activity every 100 chunks in consolidated connection state + # Check for stop signal every 100 chunks if chunk_count % 100 == 0: + # Check if stop signal has been set + if self.redis_client and self.redis_client.exists(stop_key): + logger.info(f"[{client_id}] Worker {self.worker_id} - Stop signal detected, terminating stream") + # Delete the stop key + self.redis_client.delete(stop_key) + stop_signal_detected = True + break + # Update the connection state logger.debug(f"Client: [{client_id}] Worker: {self.worker_id} sent {chunk_count} chunks for VOD: {content_name}") if redis_connection._acquire_lock(): @@ -867,7 +884,10 @@ def stream_generator(): finally: redis_connection._release_lock() - logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent") + if stop_signal_detected: + logger.info(f"[{client_id}] Worker {self.worker_id} - Stream stopped by signal: {bytes_sent} bytes sent") + else: + logger.info(f"[{client_id}] Worker {self.worker_id} - Redis-backed stream completed: {bytes_sent} bytes sent") redis_connection.decrement_active_streams() decremented = True diff --git a/apps/proxy/vod_proxy/urls.py b/apps/proxy/vod_proxy/urls.py index c06426ce0..f48f70e07 100644 --- a/apps/proxy/vod_proxy/urls.py +++ b/apps/proxy/vod_proxy/urls.py @@ -21,4 +21,7 @@ # VOD Stats path('stats/', views.VODStatsView.as_view(), name='vod_stats'), + + # Stop VOD client connection + path('stop_client/', views.stop_vod_client, name='stop_vod_client'), ] diff --git a/apps/proxy/vod_proxy/views.py b/apps/proxy/vod_proxy/views.py index 00ed8a10f..f3aca3fcf 100644 --- a/apps/proxy/vod_proxy/views.py +++ b/apps/proxy/vod_proxy/views.py @@ -15,7 +15,7 @@ from apps.vod.models import Movie, Series, Episode from apps.m3u.models import M3UAccount, M3UAccountProfile from apps.proxy.vod_proxy.connection_manager import VODConnectionManager -from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager, infer_content_type_from_url +from apps.proxy.vod_proxy.multi_worker_connection_manager import MultiWorkerVODConnectionManager, infer_content_type_from_url, get_vod_client_stop_key from .utils import get_client_info, create_vod_response logger = logging.getLogger(__name__) @@ -1011,3 +1011,59 @@ def get(self, request): except Exception as e: logger.error(f"Error getting VOD stats: {e}") return JsonResponse({'error': str(e)}, status=500) + + +from rest_framework.decorators import api_view, permission_classes +from apps.accounts.permissions import IsAdmin + + +@csrf_exempt +@api_view(["POST"]) +@permission_classes([IsAdmin]) +def stop_vod_client(request): + """Stop a specific VOD client connection using stop signal mechanism""" + try: + # Parse request body + import json + try: + data = json.loads(request.body) + except json.JSONDecodeError: + return JsonResponse({'error': 'Invalid JSON'}, status=400) + + client_id = data.get('client_id') + if not client_id: + return JsonResponse({'error': 'No client_id provided'}, status=400) + + logger.info(f"Request to stop VOD client: {client_id}") + + # Get Redis client + connection_manager = MultiWorkerVODConnectionManager.get_instance() + redis_client = connection_manager.redis_client + + if not redis_client: + return JsonResponse({'error': 'Redis not available'}, status=500) + + # Check if connection exists + connection_key = f"vod_persistent_connection:{client_id}" + connection_data = redis_client.hgetall(connection_key) + if not connection_data: + logger.warning(f"VOD connection not found: {client_id}") + return JsonResponse({'error': 'Connection not found'}, status=404) + + # Set a stop signal key that the worker will check + stop_key = get_vod_client_stop_key(client_id) + redis_client.setex(stop_key, 60, "true") # 60 second TTL + + logger.info(f"Set stop signal for VOD client: {client_id}") + + return JsonResponse({ + 'message': 'VOD client stop signal sent', + 'client_id': client_id, + 'stop_key': stop_key + }) + + except Exception as e: + logger.error(f"Error stopping VOD client: {e}", exc_info=True) + return JsonResponse({'error': str(e)}, status=500) + + diff --git a/apps/vod/tasks.py b/apps/vod/tasks.py index 1170543a9..d42be9465 100644 --- a/apps/vod/tasks.py +++ b/apps/vod/tasks.py @@ -1232,7 +1232,13 @@ def refresh_series_episodes(account, series, external_series_id, episodes_data=N def batch_process_episodes(account, series, episodes_data, scan_start_time=None): - """Process episodes in batches for better performance""" + """Process episodes in batches for better performance. + + Note: Multiple streams can represent the same episode (e.g., different languages + or qualities). Each stream has a unique stream_id, but they share the same + season/episode number. We create one Episode record per (series, season, episode) + and multiple M3UEpisodeRelation records pointing to it. + """ if not episodes_data: return @@ -1249,12 +1255,13 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) logger.info(f"Batch processing {len(all_episodes_data)} episodes for series {series.name}") # Extract episode identifiers - episode_keys = [] + # Note: episode_keys may have duplicates when multiple streams represent same episode + episode_keys = set() # Use set to track unique episode keys episode_ids = [] for episode_data in all_episodes_data: season_num = episode_data['_season_number'] episode_num = episode_data.get('episode_num', 0) - episode_keys.append((series.id, season_num, episode_num)) + episode_keys.add((series.id, season_num, episode_num)) episode_ids.append(str(episode_data.get('id'))) # Pre-fetch existing episodes @@ -1277,6 +1284,10 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) relations_to_create = [] relations_to_update = [] + # Track episodes we're creating in this batch to avoid duplicates + # Key: (series_id, season_number, episode_number) -> Episode object + episodes_pending_creation = {} + for episode_data in all_episodes_data: try: episode_id = str(episode_data.get('id')) @@ -1306,10 +1317,15 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) if backdrop: custom_props['backdrop_path'] = [backdrop] - # Find existing episode + # Find existing episode - check DB first, then pending creations episode_key = (series.id, season_number, episode_number) episode = existing_episodes.get(episode_key) + # Check if we already have this episode pending creation (multiple streams for same episode) + if not episode and episode_key in episodes_pending_creation: + episode = episodes_pending_creation[episode_key] + logger.debug(f"Reusing pending episode for S{season_number:02d}E{episode_number:02d} (stream_id: {episode_id})") + if episode: # Update existing episode updated = False @@ -1338,7 +1354,9 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) episode.custom_properties = custom_props if custom_props else None updated = True - if updated: + # Only add to update list if episode has a PK (exists in DB) and isn't already in list + # Episodes pending creation don't have PKs yet and will be created via bulk_create + if updated and episode.pk and episode not in episodes_to_update: episodes_to_update.append(episode) else: # Create new episode @@ -1356,6 +1374,8 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) custom_properties=custom_props if custom_props else None ) episodes_to_create.append(episode) + # Track this episode so subsequent streams with same season/episode can reuse it + episodes_pending_creation[episode_key] = episode # Handle episode relation if episode_id in existing_relations: @@ -1389,9 +1409,28 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) # Execute batch operations with transaction.atomic(): - # Create new episodes + # Create new episodes - use ignore_conflicts in case of race conditions if episodes_to_create: - Episode.objects.bulk_create(episodes_to_create) + Episode.objects.bulk_create(episodes_to_create, ignore_conflicts=True) + + # Re-fetch the created episodes to get their PKs + # We need to do this because bulk_create with ignore_conflicts doesn't set PKs + created_episode_keys = [ + (ep.series_id, ep.season_number, ep.episode_number) + for ep in episodes_to_create + ] + db_episodes = Episode.objects.filter(series=series) + episode_pk_map = { + (ep.series_id, ep.season_number, ep.episode_number): ep + for ep in db_episodes + } + + # Update relations to point to the actual DB episodes with PKs + for relation in relations_to_create: + ep = relation.episode + key = (ep.series_id, ep.season_number, ep.episode_number) + if key in episode_pk_map: + relation.episode = episode_pk_map[key] # Update existing episodes if episodes_to_update: @@ -1400,9 +1439,9 @@ def batch_process_episodes(account, series, episodes_data, scan_start_time=None) 'tmdb_id', 'imdb_id', 'custom_properties' ]) - # Create new episode relations + # Create new episode relations - use ignore_conflicts for stream_id duplicates if relations_to_create: - M3UEpisodeRelation.objects.bulk_create(relations_to_create) + M3UEpisodeRelation.objects.bulk_create(relations_to_create, ignore_conflicts=True) # Update existing episode relations if relations_to_update: diff --git a/dispatcharr/settings.py b/dispatcharr/settings.py index d6c29dd9c..556fb39d0 100644 --- a/dispatcharr/settings.py +++ b/dispatcharr/settings.py @@ -4,7 +4,7 @@ BASE_DIR = Path(__file__).resolve().parent.parent -SECRET_KEY = "REPLACE_ME_WITH_A_REAL_SECRET" +SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY") REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") REDIS_DB = os.environ.get("REDIS_DB", "0") @@ -226,6 +226,13 @@ MEDIA_ROOT = BASE_DIR / "media" MEDIA_URL = "/media/" +# Backup settings +BACKUP_ROOT = os.environ.get("BACKUP_ROOT", "/data/backups") +BACKUP_DATA_DIRS = [ + os.environ.get("LOGOS_DIR", "/data/logos"), + os.environ.get("UPLOADS_DIR", "/data/uploads"), + os.environ.get("PLUGINS_DIR", "/data/plugins"), +] SERVER_IP = "127.0.0.1" diff --git a/dispatcharr/utils.py b/dispatcharr/utils.py index 260515fc8..56243b7ac 100644 --- a/dispatcharr/utils.py +++ b/dispatcharr/utils.py @@ -44,7 +44,7 @@ def network_access_allowed(request, settings_key): cidrs = ( network_access[settings_key].split(",") if settings_key in network_access - else ["0.0.0.0/0"] + else ["0.0.0.0/0", "::/0"] ) network_allowed = False diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index fa0eea01f..72eb59282 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -40,6 +40,21 @@ export REDIS_DB=${REDIS_DB:-0} export DISPATCHARR_PORT=${DISPATCHARR_PORT:-9191} export LIBVA_DRIVERS_PATH='/usr/local/lib/x86_64-linux-gnu/dri' export LD_LIBRARY_PATH='/usr/local/lib' +export SECRET_FILE="/data/jwt" +# Ensure Django secret key exists or generate a new one +if [ ! -f "$SECRET_FILE" ]; then + echo "Generating new Django secret key..." + old_umask=$(umask) + umask 077 + tmpfile="$(mktemp "${SECRET_FILE}.XXXXXX")" || { echo "mktemp failed"; exit 1; } + python3 - <<'PY' >"$tmpfile" || { echo "secret generation failed"; rm -f "$tmpfile"; exit 1; } +import secrets +print(secrets.token_urlsafe(64)) +PY + mv -f "$tmpfile" "$SECRET_FILE" || { echo "move failed"; rm -f "$tmpfile"; exit 1; } + umask $old_umask +fi +export DJANGO_SECRET_KEY="$(cat "$SECRET_FILE")" # Process priority configuration # UWSGI_NICE_LEVEL: Absolute nice value for uWSGI/streaming (default: 0 = normal priority) @@ -90,7 +105,7 @@ if [[ ! -f /etc/profile.d/dispatcharr.sh ]]; then DISPATCHARR_ENV DISPATCHARR_DEBUG DISPATCHARR_LOG_LEVEL REDIS_HOST REDIS_DB POSTGRES_DIR DISPATCHARR_PORT DISPATCHARR_VERSION DISPATCHARR_TIMESTAMP LIBVA_DRIVERS_PATH LIBVA_DRIVER_NAME LD_LIBRARY_PATH - CELERY_NICE_LEVEL UWSGI_NICE_LEVEL + CELERY_NICE_LEVEL UWSGI_NICE_LEVEL DJANGO_SECRET_KEY ) # Process each variable for both profile.d and environment @@ -187,7 +202,7 @@ fi # Users can override via UWSGI_NICE_LEVEL environment variable in docker-compose # Start with nice as root, then use setpriv to drop privileges to dispatch user # This preserves both the nice value and environment variables -nice -n $UWSGI_NICE_LEVEL su -p - "$POSTGRES_USER" -c "cd /app && exec uwsgi $uwsgi_args" & uwsgi_pid=$! +nice -n $UWSGI_NICE_LEVEL su - "$POSTGRES_USER" -c "cd /app && exec /dispatcharrpy/bin/uwsgi $uwsgi_args" & uwsgi_pid=$! echo "✅ uwsgi started with PID $uwsgi_pid (nice $UWSGI_NICE_LEVEL)" pids+=("$uwsgi_pid") diff --git a/docker/init/03-init-dispatcharr.sh b/docker/init/03-init-dispatcharr.sh index 5fbef23d1..da7d44840 100644 --- a/docker/init/03-init-dispatcharr.sh +++ b/docker/init/03-init-dispatcharr.sh @@ -29,9 +29,17 @@ if [ "$(id -u)" = "0" ] && [ -d "/app" ]; then chown $PUID:$PGID /app fi fi - +# Configure nginx port sed -i "s/NGINX_PORT/${DISPATCHARR_PORT}/g" /etc/nginx/sites-enabled/default +# Configure nginx based on IPv6 availability +if ip -6 addr show | grep -q "inet6"; then + echo "✅ IPv6 is available, enabling IPv6 in nginx" +else + echo "⚠️ IPv6 not available, disabling IPv6 in nginx" + sed -i '/listen \[::\]:/d' /etc/nginx/sites-enabled/default +fi + # NOTE: mac doesn't run as root, so only manage permissions # if this script is running as root if [ "$(id -u)" = "0" ]; then diff --git a/docker/nginx.conf b/docker/nginx.conf index 5e754d20c..406d587c5 100644 --- a/docker/nginx.conf +++ b/docker/nginx.conf @@ -3,6 +3,7 @@ proxy_cache_path /app/logo_cache levels=1:2 keys_zone=logo_cache:10m server { listen NGINX_PORT; + listen [::]:NGINX_PORT; proxy_connect_timeout 75; proxy_send_timeout 300; @@ -34,6 +35,13 @@ server { root /data; } + # Internal location for X-Accel-Redirect backup downloads + # Django handles auth, nginx serves the file directly + location /protected-backups/ { + internal; + alias /data/backups/; + } + location /api/logos/(?\d+)/cache/ { proxy_pass http://127.0.0.1:5656; proxy_cache logo_cache; diff --git a/docker/uwsgi.debug.ini b/docker/uwsgi.debug.ini index 3de890a53..69c040f22 100644 --- a/docker/uwsgi.debug.ini +++ b/docker/uwsgi.debug.ini @@ -20,7 +20,6 @@ module = scripts.debug_wrapper:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings - socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true diff --git a/docker/uwsgi.ini b/docker/uwsgi.ini index f8fe8ab7c..d831adfca 100644 --- a/docker/uwsgi.ini +++ b/docker/uwsgi.ini @@ -21,6 +21,7 @@ module = dispatcharr.wsgi:application virtualenv = /dispatcharrpy master = true env = DJANGO_SETTINGS_MODULE=dispatcharr.settings +env = USE_NGINX_ACCEL=true socket = /app/uwsgi.sock chmod-socket = 777 vacuum = true diff --git a/frontend/src/WebSocket.jsx b/frontend/src/WebSocket.jsx index f2e28ae93..40035d33b 100644 --- a/frontend/src/WebSocket.jsx +++ b/frontend/src/WebSocket.jsx @@ -574,7 +574,7 @@ export const WebsocketProvider = ({ children }) => { const sourceId = parsedEvent.data.source || parsedEvent.data.account; const epg = epgs[sourceId]; - + // Only update progress if the EPG still exists in the store // This prevents crashes when receiving updates for deleted EPGs if (epg) { @@ -582,7 +582,9 @@ export const WebsocketProvider = ({ children }) => { updateEPGProgress(parsedEvent.data); } else { // EPG was deleted, ignore this update - console.debug(`Ignoring EPG refresh update for deleted EPG ${sourceId}`); + console.debug( + `Ignoring EPG refresh update for deleted EPG ${sourceId}` + ); break; } @@ -621,6 +623,10 @@ export const WebsocketProvider = ({ children }) => { status: parsedEvent.data.status || 'success', last_message: parsedEvent.data.message || epg.last_message, + // Use the timestamp from the backend if provided + ...(parsedEvent.data.updated_at && { + updated_at: parsedEvent.data.updated_at, + }), }); // Only show success notification if we've finished parsing programs and had no errors diff --git a/frontend/src/api.js b/frontend/src/api.js index 7eda6a3f1..64ce4d772 100644 --- a/frontend/src/api.js +++ b/frontend/src/api.js @@ -1349,6 +1349,183 @@ export default class API { } } + // Backup API (async with Celery task polling) + static async listBackups() { + try { + const response = await request(`${host}/api/backups/`); + return response || []; + } catch (e) { + errorNotification('Failed to load backups', e); + throw e; + } + } + + static async getBackupStatus(taskId, token = null) { + try { + let url = `${host}/api/backups/status/${taskId}/`; + if (token) { + url += `?token=${encodeURIComponent(token)}`; + } + const response = await request(url, { auth: !token }); + return response; + } catch (e) { + throw e; + } + } + + static async waitForBackupTask(taskId, onProgress, token = null) { + const pollInterval = 2000; // Poll every 2 seconds + const maxAttempts = 300; // Max 10 minutes (300 * 2s) + + for (let attempt = 0; attempt < maxAttempts; attempt++) { + try { + const status = await API.getBackupStatus(taskId, token); + + if (onProgress) { + onProgress(status); + } + + if (status.state === 'completed') { + return status.result; + } else if (status.state === 'failed') { + throw new Error(status.error || 'Task failed'); + } + } catch (e) { + throw e; + } + + // Wait before next poll + await new Promise((resolve) => setTimeout(resolve, pollInterval)); + } + + throw new Error('Task timed out'); + } + + static async createBackup(onProgress) { + try { + // Start the backup task + const response = await request(`${host}/api/backups/create/`, { + method: 'POST', + }); + + // Wait for the task to complete using token for auth + const result = await API.waitForBackupTask(response.task_id, onProgress, response.task_token); + return result; + } catch (e) { + errorNotification('Failed to create backup', e); + throw e; + } + } + + static async uploadBackup(file) { + try { + const formData = new FormData(); + formData.append('file', file); + + const response = await request( + `${host}/api/backups/upload/`, + { + method: 'POST', + body: formData, + } + ); + return response; + } catch (e) { + errorNotification('Failed to upload backup', e); + throw e; + } + } + + static async deleteBackup(filename) { + try { + const encodedFilename = encodeURIComponent(filename); + await request(`${host}/api/backups/${encodedFilename}/delete/`, { + method: 'DELETE', + }); + } catch (e) { + errorNotification('Failed to delete backup', e); + throw e; + } + } + + static async getDownloadToken(filename) { + // Get a download token from the server + try { + const response = await request(`${host}/api/backups/${encodeURIComponent(filename)}/download-token/`); + return response.token; + } catch (e) { + throw e; + } + } + + static async downloadBackup(filename) { + try { + // Get a download token first (requires auth) + const token = await API.getDownloadToken(filename); + const encodedFilename = encodeURIComponent(filename); + + // Build the download URL with token + const downloadUrl = `${host}/api/backups/${encodedFilename}/download/?token=${encodeURIComponent(token)}`; + + // Use direct browser navigation instead of fetch to avoid CORS issues + const link = document.createElement('a'); + link.href = downloadUrl; + link.download = filename; + document.body.appendChild(link); + link.click(); + document.body.removeChild(link); + + return { filename }; + } catch (e) { + errorNotification('Failed to download backup', e); + throw e; + } + } + + static async restoreBackup(filename, onProgress) { + try { + // Start the restore task + const encodedFilename = encodeURIComponent(filename); + const response = await request( + `${host}/api/backups/${encodedFilename}/restore/`, + { + method: 'POST', + } + ); + + // Wait for the task to complete using token for auth + // Token-based auth allows status polling even after DB restore invalidates user sessions + const result = await API.waitForBackupTask(response.task_id, onProgress, response.task_token); + return result; + } catch (e) { + errorNotification('Failed to restore backup', e); + throw e; + } + } + + static async getBackupSchedule() { + try { + const response = await request(`${host}/api/backups/schedule/`); + return response; + } catch (e) { + errorNotification('Failed to get backup schedule', e); + throw e; + } + } + + static async updateBackupSchedule(settings) { + try { + const response = await request(`${host}/api/backups/schedule/update/`, { + method: 'PUT', + body: settings, + }); + return response; + } catch (e) { + errorNotification('Failed to update backup schedule', e); + throw e; + } + } + static async getVersion() { try { const response = await request(`${host}/api/core/version/`, { @@ -1514,6 +1691,19 @@ export default class API { } } + static async stopVODClient(clientId) { + try { + const response = await request(`${host}/proxy/vod/stop_client/`, { + method: 'POST', + body: { client_id: clientId }, + }); + + return response; + } catch (e) { + errorNotification('Failed to stop VOD client', e); + } + } + static async stopChannel(id) { try { const response = await request(`${host}/proxy/ts/stop/${id}`, { @@ -2131,7 +2321,8 @@ export default class API { static async deleteSeriesRule(tvgId) { try { - await request(`${host}/api/channels/series-rules/${tvgId}/`, { method: 'DELETE' }); + const encodedTvgId = encodeURIComponent(tvgId); + await request(`${host}/api/channels/series-rules/${encodedTvgId}/`, { method: 'DELETE' }); notifications.show({ title: 'Series rule removed' }); } catch (e) { errorNotification('Failed to remove series rule', e); diff --git a/frontend/src/components/FloatingVideo.jsx b/frontend/src/components/FloatingVideo.jsx index 6aaeecdaf..611d7e2a4 100644 --- a/frontend/src/components/FloatingVideo.jsx +++ b/frontend/src/components/FloatingVideo.jsx @@ -1,5 +1,5 @@ // frontend/src/components/FloatingVideo.js -import React, { useEffect, useRef, useState } from 'react'; +import React, { useCallback, useEffect, useRef, useState } from 'react'; import Draggable from 'react-draggable'; import useVideoStore from '../store/useVideoStore'; import mpegts from 'mpegts.js'; @@ -17,7 +17,19 @@ export default function FloatingVideo() { const [isLoading, setIsLoading] = useState(false); const [loadError, setLoadError] = useState(null); const [showOverlay, setShowOverlay] = useState(true); + const [videoSize, setVideoSize] = useState({ width: 320, height: 180 }); + const [isResizing, setIsResizing] = useState(false); + const resizeStateRef = useRef(null); const overlayTimeoutRef = useRef(null); + const aspectRatioRef = useRef(320 / 180); + const [dragPosition, setDragPosition] = useState(null); + const dragPositionRef = useRef(null); + const dragOffsetRef = useRef({ x: 0, y: 0 }); + const initialPositionRef = useRef(null); + + const MIN_WIDTH = 220; + const MIN_HEIGHT = 124; + const VISIBLE_MARGIN = 48; // keep part of the window visible when dragging // Safely destroy the mpegts player to prevent errors const safeDestroyPlayer = () => { @@ -315,24 +327,223 @@ export default function FloatingVideo() { }, 50); }; + const clampToVisible = useCallback( + (x, y) => { + if (typeof window === 'undefined') return { x, y }; + + const minX = -(videoSize.width - VISIBLE_MARGIN); + const minY = -(videoSize.height - VISIBLE_MARGIN); + const maxX = window.innerWidth - VISIBLE_MARGIN; + const maxY = window.innerHeight - VISIBLE_MARGIN; + + return { + x: Math.min(Math.max(x, minX), maxX), + y: Math.min(Math.max(y, minY), maxY), + }; + }, + [VISIBLE_MARGIN, videoSize.height, videoSize.width] + ); + + const handleResizeMove = useCallback( + (event) => { + if (!resizeStateRef.current) return; + + const clientX = + event.touches && event.touches.length ? event.touches[0].clientX : event.clientX; + const clientY = + event.touches && event.touches.length ? event.touches[0].clientY : event.clientY; + + const deltaX = clientX - resizeStateRef.current.startX; + const deltaY = clientY - resizeStateRef.current.startY; + const aspectRatio = resizeStateRef.current.aspectRatio || aspectRatioRef.current; + + // Derive width/height while keeping the original aspect ratio + let nextWidth = resizeStateRef.current.startWidth + deltaX; + let nextHeight = nextWidth / aspectRatio; + + // Allow vertical-driven resize if the user drags mostly vertically + if (Math.abs(deltaY) > Math.abs(deltaX)) { + nextHeight = resizeStateRef.current.startHeight + deltaY; + nextWidth = nextHeight * aspectRatio; + } + + // Respect minimums while keeping the ratio + if (nextWidth < MIN_WIDTH) { + nextWidth = MIN_WIDTH; + nextHeight = nextWidth / aspectRatio; + } + + if (nextHeight < MIN_HEIGHT) { + nextHeight = MIN_HEIGHT; + nextWidth = nextHeight * aspectRatio; + } + + // Keep within viewport with a margin based on current position + const posX = dragPositionRef.current?.x ?? 0; + const posY = dragPositionRef.current?.y ?? 0; + const margin = VISIBLE_MARGIN; + + const maxWidth = Math.max(MIN_WIDTH, window.innerWidth - posX - margin); + const maxHeight = Math.max(MIN_HEIGHT, window.innerHeight - posY - margin); + + if (nextWidth > maxWidth) { + nextWidth = maxWidth; + nextHeight = nextWidth / aspectRatio; + } + + if (nextHeight > maxHeight) { + nextHeight = maxHeight; + nextWidth = nextHeight * aspectRatio; + } + + // Final pass to honor both bounds while keeping the ratio + if (nextWidth > maxWidth) { + nextWidth = maxWidth; + nextHeight = nextWidth / aspectRatio; + } + + setVideoSize({ + width: Math.round(nextWidth), + height: Math.round(nextHeight), + }); + }, + [MIN_HEIGHT, MIN_WIDTH, VISIBLE_MARGIN] + ); + + const endResize = useCallback(() => { + setIsResizing(false); + resizeStateRef.current = null; + window.removeEventListener('mousemove', handleResizeMove); + window.removeEventListener('mouseup', endResize); + window.removeEventListener('touchmove', handleResizeMove); + window.removeEventListener('touchend', endResize); + }, [handleResizeMove]); + + const startResize = (event) => { + event.stopPropagation(); + event.preventDefault(); + + const clientX = + event.touches && event.touches.length ? event.touches[0].clientX : event.clientX; + const clientY = + event.touches && event.touches.length ? event.touches[0].clientY : event.clientY; + + const aspectRatio = + videoSize.height > 0 ? videoSize.width / videoSize.height : aspectRatioRef.current; + aspectRatioRef.current = aspectRatio; + + resizeStateRef.current = { + startX: clientX, + startY: clientY, + startWidth: videoSize.width, + startHeight: videoSize.height, + aspectRatio, + }; + + setIsResizing(true); + + window.addEventListener('mousemove', handleResizeMove); + window.addEventListener('mouseup', endResize); + window.addEventListener('touchmove', handleResizeMove); + window.addEventListener('touchend', endResize); + }; + + useEffect(() => { + return () => { + endResize(); + }; + }, [endResize]); + + useEffect(() => { + dragPositionRef.current = dragPosition; + }, [dragPosition]); + + // Initialize the floating window near bottom-right once + useEffect(() => { + if (initialPositionRef.current || typeof window === 'undefined') return; + + const initialX = Math.max(10, window.innerWidth - videoSize.width - 20); + const initialY = Math.max(10, window.innerHeight - videoSize.height - 20); + const pos = clampToVisible(initialX, initialY); + + initialPositionRef.current = pos; + setDragPosition(pos); + dragPositionRef.current = pos; + }, [clampToVisible, videoSize.height, videoSize.width]); + + const handleDragStart = useCallback( + (event, data) => { + const clientX = event.touches?.[0]?.clientX ?? event.clientX; + const clientY = event.touches?.[0]?.clientY ?? event.clientY; + const rect = videoContainerRef.current?.getBoundingClientRect(); + + if (clientX != null && clientY != null && rect) { + dragOffsetRef.current = { + x: clientX - rect.left, + y: clientY - rect.top, + }; + } else { + dragOffsetRef.current = { x: 0, y: 0 }; + } + + const clamped = clampToVisible(data?.x ?? 0, data?.y ?? 0); + setDragPosition(clamped); + dragPositionRef.current = clamped; + }, + [clampToVisible] + ); + + const handleDrag = useCallback( + (event) => { + const clientX = event.touches?.[0]?.clientX ?? event.clientX; + const clientY = event.touches?.[0]?.clientY ?? event.clientY; + if (clientX == null || clientY == null) return; + + const nextX = clientX - (dragOffsetRef.current?.x ?? 0); + const nextY = clientY - (dragOffsetRef.current?.y ?? 0); + const clamped = clampToVisible(nextX, nextY); + setDragPosition(clamped); + dragPositionRef.current = clamped; + }, + [clampToVisible] + ); + + const handleDragStop = useCallback( + (_, data) => { + const clamped = clampToVisible(data?.x ?? 0, data?.y ?? 0); + setDragPosition(clamped); + dragPositionRef.current = clamped; + }, + [clampToVisible] + ); + // If the floating video is hidden or no URL is selected, do not render if (!isVisible || !streamUrl) { return null; } return ( - +
@@ -378,10 +589,12 @@ export default function FloatingVideo() {