diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 123014908bebb..5ace4600a1f26 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,4 +3,4 @@ updates: - package-ecosystem: "github-actions" directory: "/" schedule: - interval: "daily" + interval: "weekly" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 73e27c1ef0930..e41353b5a11c8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,93 +1,50 @@ ---- -# Github Actions build for rclone -# -*- compile-command: "yamllint -f parsable build.yml" -*- +name: Rclone Build -name: build - -# Trigger the workflow on push or pull request on: push: - branches: - - '**' tags: - '**' - pull_request: - workflow_dispatch: - inputs: - manual: - description: Manual run (bypass default conditions) - type: boolean - default: true + +env: + IMAGE: tgdrive/rclone + jobs: build: - if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) timeout-minutes: 60 - defaults: - run: - shell: bash strategy: fail-fast: false matrix: - job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.24'] + job_name: ['linux', 'windows'] include: - job_name: linux os: ubuntu-latest - go: '>=1.25.0-rc.1' + go: '>=1.24.0-rc.1' gotags: cmount - build_flags: '-include "^linux/"' - check: true - quicktest: true - racequicktest: true - librclonetest: true - deploy: true + cgo: '0' + build_flags: '-include "^(linux/amd64|linux/arm64|linux/arm-v7)"' - - job_name: linux_386 - os: ubuntu-latest - go: '>=1.25.0-rc.1' - goarch: 386 + - job_name: windows + os: windows-latest + go: '>=1.24.0-rc.1' gotags: cmount - quicktest: true - + cgo: '0' + build_flags: '-include "^(windows/amd64|windows/arm64)"' + build_args: '-buildmode exe' + - job_name: mac_amd64 os: macos-latest - go: '>=1.25.0-rc.1' + go: '>=1.23.0-rc.1' gotags: 'cmount' build_flags: '-include "^darwin/amd64" -cgo' - quicktest: true - racequicktest: true - deploy: true - job_name: mac_arm64 os: macos-latest - go: '>=1.25.0-rc.1' + go: '>=1.23.0-rc.1' gotags: 'cmount' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' - deploy: true - - job_name: windows - os: windows-latest - go: '>=1.25.0-rc.1' - gotags: cmount - cgo: '0' - build_flags: '-include "^windows/"' - build_args: '-buildmode exe' - quicktest: true - deploy: true - - - job_name: other_os - os: ubuntu-latest - go: '>=1.25.0-rc.1' - build_flags: '-exclude "^(windows/|darwin/|linux/)"' - compile_all: true - deploy: true - - - job_name: go1.24 - os: ubuntu-latest - go: '1.24' - quicktest: true - racequicktest: true name: ${{ matrix.job_name }} @@ -95,25 +52,26 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: fetch-depth: 0 - name: Install Go - uses: actions/setup-go@v6 + uses: actions/setup-go@v5 with: go-version: ${{ matrix.go }} check-latest: true - name: Set environment variables + shell: bash run: | echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV - if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi - name: Install Libraries on Linux + shell: bash run: | sudo modprobe fuse sudo chmod 666 /dev/fuse @@ -123,6 +81,7 @@ jobs: if: matrix.os == 'ubuntu-latest' - name: Install Libraries on macOS + shell: bash run: | # https://github.com/Homebrew/brew/issues/15621#issuecomment-1619266788 # https://github.com/orgs/Homebrew/discussions/4612#discussioncomment-6319008 @@ -138,7 +97,8 @@ jobs: shell: powershell run: | $ProgressPreference = 'SilentlyContinue' - choco install -y winfsp zip + choco install -y winfsp + choco install zip --source="bin" echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append if ($env:GOARCH -eq "386") { choco install -y mingw --forcex86 --force @@ -151,6 +111,7 @@ jobs: if: matrix.os == 'windows-latest' - name: Print Go version and environment + shell: bash run: | printf "Using go at: $(which go)\n" printf "Go version: $(go version)\n" @@ -162,193 +123,45 @@ jobs: env - name: Build rclone - run: | - make - - - name: Rclone version - run: | - rclone version - - - name: Run tests - run: | - make quicktest - if: matrix.quicktest - - - name: Race test - run: | - make racequicktest - if: matrix.racequicktest - - - name: Run librclone tests - run: | - make -C librclone/ctest test - make -C librclone/ctest clean - librclone/python/test_rclone.py - if: matrix.librclonetest - - - name: Compile all architectures test - run: | - make - make compile_all - if: matrix.compile_all - - - name: Deploy built binaries + shell: bash run: | if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi - make ci_beta - env: - RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} - # working-directory: '$(modulePath)' - # Deploy binaries if enabled in config && not a PR && not a fork - if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' - - lint: - if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) - timeout-minutes: 30 - name: "lint" - runs-on: ubuntu-latest - - steps: - - name: Get runner parameters - id: get-runner-parameters - run: | - echo "year-week=$(/bin/date -u "+%Y%V")" >> $GITHUB_OUTPUT - echo "runner-os-version=$ImageOS" >> $GITHUB_OUTPUT - - - name: Checkout - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - - name: Install Go - id: setup-go - uses: actions/setup-go@v6 - with: - go-version: '>=1.24.0-rc.1' - check-latest: true - cache: false + make cross - - name: Cache - uses: actions/cache@v4 + - name: Upload Binary + uses: actions/upload-artifact@v4 with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/.cache/golangci-lint - key: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}-${{ hashFiles('go.sum') }} - restore-keys: golangci-lint-${{ steps.get-runner-parameters.outputs.runner-os-version }}-go${{ steps.setup-go.outputs.go-version }}-${{ steps.get-runner-parameters.outputs.year-week }}- - - - name: Code quality test (Linux) - uses: golangci/golangci-lint-action@v8 - with: - version: latest - skip-cache: true - - - name: Code quality test (Windows) - uses: golangci/golangci-lint-action@v8 - env: - GOOS: "windows" - with: - version: latest - skip-cache: true - - - name: Code quality test (macOS) - uses: golangci/golangci-lint-action@v8 - env: - GOOS: "darwin" - with: - version: latest - skip-cache: true - - - name: Code quality test (FreeBSD) - uses: golangci/golangci-lint-action@v8 - env: - GOOS: "freebsd" - with: - version: latest - skip-cache: true - - - name: Code quality test (OpenBSD) - uses: golangci/golangci-lint-action@v8 - env: - GOOS: "openbsd" - with: - version: latest - skip-cache: true - - - name: Install govulncheck - run: go install golang.org/x/vuln/cmd/govulncheck@latest - - - name: Scan for vulnerabilities - run: govulncheck ./... - - - name: Check Markdown format - uses: DavidAnson/markdownlint-cli2-action@v20 - with: - globs: | - CONTRIBUTING.md - MAINTAINERS.md - README.md - RELEASE.md - CODE_OF_CONDUCT.md - docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md - - - name: Scan edits of autogenerated files - run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}' - if: github.event_name == 'pull_request' + name: rclone-${{ matrix.job_name }} + path: ${{ github.workspace }}/build/**/* + retention-days: 1 android: - if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) timeout-minutes: 30 name: "android-all" runs-on: ubuntu-latest - steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: fetch-depth: 0 - - # Upgrade together with NDK version - name: Set up Go - uses: actions/setup-go@v6 + uses: actions/setup-go@v5 with: - go-version: '>=1.25.0-rc.1' - - - name: Set global environment variables - run: | - echo "VERSION=$(make version)" >> $GITHUB_ENV - - - name: build native rclone - run: | - make - - - name: install gomobile - run: | - go install golang.org/x/mobile/cmd/gobind@latest - go install golang.org/x/mobile/cmd/gomobile@latest - env PATH=$PATH:~/go/bin gomobile init - echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV - - - name: arm-v7a gomobile build - run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile - - - name: arm-v7a Set environment variables - run: | - echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV - echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV - echo 'GOOS=android' >> $GITHUB_ENV - echo 'GOARCH=arm' >> $GITHUB_ENV - echo 'GOARM=7' >> $GITHUB_ENV - echo 'CGO_ENABLED=1' >> $GITHUB_ENV - echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV + go-version: '>=1.24.0-rc.1' - - name: arm-v7a build - run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a . + - name: Go module cache + uses: actions/cache@v4 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + - name: arm64-v8a Set environment variables + shell: bash run: | - echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV + echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV echo 'GOOS=android' >> $GITHUB_ENV echo 'GOARCH=arm64' >> $GITHUB_ENV @@ -356,36 +169,93 @@ jobs: echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV - name: arm64-v8a build - run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a . + run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${{ github.ref_name }} -o build/rclone-armv8a . - - name: x86 Set environment variables + - name: Package into Zip run: | - echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV - echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV - echo 'GOOS=android' >> $GITHUB_ENV - echo 'GOARCH=386' >> $GITHUB_ENV - echo 'CGO_ENABLED=1' >> $GITHUB_ENV - echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV + cd build + mv rclone-armv8a rclone + zip -r rclone-${{ github.ref_name }}-android-arm64.zip rclone - - name: x86 build - run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 . + - name: Upload Artifacts + uses: actions/upload-artifact@v4 + with: + name: rclone-android + path: ${{ github.workspace }}/build/rclone-*.zip + retention-days: 1 + + release: + name: Create Release + runs-on: ubuntu-latest + needs: [build,android] + steps: + - name: Checkout + uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - name: Download All Artifacts + uses: actions/download-artifact@v4 + with: + path: /tmp/build + pattern: rclone-* + merge-multiple: true - - name: x64 Set environment variables + - name: Copy Artifacts run: | - echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV - echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV - echo 'GOOS=android' >> $GITHUB_ENV - echo 'GOARCH=amd64' >> $GITHUB_ENV - echo 'CGO_ENABLED=1' >> $GITHUB_ENV - echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV - - - name: x64 build - run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 . + mkdir build + cp -r /tmp/build/* build/ + + - name: Upload Assets + shell: bash + env: + GH_TOKEN: ${{ github.token }} + run: | + make upload_github + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 - - name: Upload artifacts + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set Docker metadata + id: docker_meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE }} + labels: | + org.opencontainers.image.version=${{ github.ref_name}} + org.opencontainers.image.revision=${{ github.sha }} + org.opencontainers.image.title=${{ env.IMAGE }} + org.opencontainers.image.description=Rclone Docker Image + + - name: Publish to Docker + uses: docker/build-push-action@v6 + with: + context: . + push: true + platforms: linux/amd64,linux/arm64,linux/arm/v7 + build-args: | + VERSION=${{ github.ref_name}} + labels: ${{ steps.docker_meta.outputs.labels }} + tags: | + ghcr.io/${{ env.IMAGE }}:${{ github.ref_name}} + ghcr.io/${{ env.IMAGE }}:latest + + - name: Publish Docker Plugin run: | - make ci_upload - env: - RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }} - # Upload artifacts if not a PR && not a fork - if: env.RCLONE_CONFIG_PASS != '' && github.head_ref == '' && github.repository == 'rclone/rclone' + VER=${GITHUB_REF_NAME} + PLUGIN_USER=ghcr.io/tgdrive + for PLUGIN_ARCH in amd64 arm64 arm/v7 ;do + export PLUGIN_ARCH + export PLUGIN_USER + make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-} + make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v} + done + make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest + make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v} diff --git a/.github/workflows/build_publish_docker_image.yml b/.github/workflows/build_publish_docker_image.yml deleted file mode 100644 index 6a1285cc56746..0000000000000 --- a/.github/workflows/build_publish_docker_image.yml +++ /dev/null @@ -1,294 +0,0 @@ ---- -# Github Actions release for rclone -# -*- compile-command: "yamllint -f parsable build_publish_docker_image.yml" -*- - -name: Build & Push Docker Images - -# Trigger the workflow on push or pull request -on: - push: - branches: - - '**' - tags: - - '**' - workflow_dispatch: - inputs: - manual: - description: Manual run (bypass default conditions) - type: boolean - default: true - -jobs: - build-image: - if: inputs.manual || (github.repository == 'rclone/rclone' && github.event_name != 'pull_request') - timeout-minutes: 60 - strategy: - fail-fast: false - matrix: - include: - - platform: linux/amd64 - runs-on: ubuntu-24.04 - - platform: linux/386 - runs-on: ubuntu-24.04 - - platform: linux/arm64 - runs-on: ubuntu-24.04-arm - - platform: linux/arm/v7 - runs-on: ubuntu-24.04-arm - - platform: linux/arm/v6 - runs-on: ubuntu-24.04-arm - - name: Build Docker Image for ${{ matrix.platform }} - runs-on: ${{ matrix.runs-on }} - - steps: - - name: Free Space - shell: bash - run: | - df -h . - # Remove android SDK - sudo rm -rf /usr/local/lib/android || true - # Remove .net runtime - sudo rm -rf /usr/share/dotnet || true - df -h . - - - name: Checkout Repository - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - - name: Set REPO_NAME Variable - run: | - echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV} - - - name: Set PLATFORM Variable - run: | - platform=${{ matrix.platform }} - echo "PLATFORM=${platform//\//-}" >> $GITHUB_ENV - - - name: Set CACHE_NAME Variable - shell: python - run: | - import os, re - - def slugify(input_string, max_length=63): - slug = input_string.lower() - slug = re.sub(r'[^a-z0-9 -]', ' ', slug) - slug = slug.strip() - slug = re.sub(r'\s+', '-', slug) - slug = re.sub(r'-+', '-', slug) - slug = slug[:max_length] - slug = re.sub(r'[-]+$', '', slug) - return slug - - ref_name_slug = "cache" - - if os.environ.get("GITHUB_REF_NAME") and os.environ['GITHUB_EVENT_NAME'] == "pull_request": - ref_name_slug += "-pr-" + slugify(os.environ['GITHUB_REF_NAME']) - - with open(os.environ['GITHUB_ENV'], 'a') as env: - env.write(f"CACHE_NAME={ref_name_slug}\n") - - - name: Get ImageOS - # There's no way around this, because "ImageOS" is only available to - # processes, but the setup-go action uses it in its key. - id: imageos - uses: actions/github-script@v8 - with: - result-encoding: string - script: | - return process.env.ImageOS - - - name: Extract Metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v5 - env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,manifest-descriptor # Important for digest annotation (used by Github packages) - with: - images: | - ghcr.io/${{ env.REPO_NAME }} - labels: | - org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone - org.opencontainers.image.vendor=${{ github.repository_owner }} - org.opencontainers.image.authors=rclone - org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} - tags: | - type=sha - type=ref,event=pr - type=ref,event=branch - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} - type=raw,value=beta,enable={{is_default_branch}} - - - name: Setup QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Load Go Build Cache for Docker - id: go-cache - uses: actions/cache@v4 - with: - key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }} - # Cache only the go builds, the module download is cached via the docker layer caching - path: | - go-build-cache - - - name: Inject Go Build Cache into Docker - uses: reproducible-containers/buildkit-cache-dance@v3 - with: - cache-map: | - { - "go-build-cache": "/root/.cache/go-build" - } - skip-extraction: ${{ steps.go-cache.outputs.cache-hit }} - - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - # This is the user that triggered the Workflow. In this case, it will - # either be the user whom created the Release or manually triggered - # the workflow_dispatch. - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and Publish Image Digest - id: build - uses: docker/build-push-action@v6 - with: - file: Dockerfile - context: . - provenance: false - # don't specify 'tags' here (error "get can't push tagged ref by digest") - # tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - annotations: ${{ steps.meta.outputs.annotations }} - platforms: ${{ matrix.platform }} - outputs: | - type=image,name=ghcr.io/${{ env.REPO_NAME }},push-by-digest=true,name-canonical=true,push=true - cache-from: | - type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }} - cache-to: | - type=registry,ref=ghcr.io/${{ env.REPO_NAME }}:build-${{ env.CACHE_NAME }}-${{ env.PLATFORM }},image-manifest=true,mode=max,compression=zstd - - - name: Export Image Digest - run: | - mkdir -p /tmp/digests - digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" - - - name: Upload Image Digest - uses: actions/upload-artifact@v4 - with: - name: digests-${{ env.PLATFORM }} - path: /tmp/digests/* - retention-days: 1 - if-no-files-found: error - - merge-image: - name: Merge & Push Final Docker Image - runs-on: ubuntu-24.04 - needs: - - build-image - - steps: - - name: Download Image Digests - uses: actions/download-artifact@v5 - with: - path: /tmp/digests - pattern: digests-* - merge-multiple: true - - - name: Set REPO_NAME Variable - run: | - echo "REPO_NAME=`echo ${{github.repository}} | tr '[:upper:]' '[:lower:]'`" >> ${GITHUB_ENV} - - - name: Extract Metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v5 - env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: index - with: - images: | - ${{ env.REPO_NAME }} - ghcr.io/${{ env.REPO_NAME }} - labels: | - org.opencontainers.image.url=https://github.com/rclone/rclone/pkgs/container/rclone - org.opencontainers.image.vendor=${{ github.repository_owner }} - org.opencontainers.image.authors=rclone - org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }} - org.opencontainers.image.revision=${{ github.sha }} - tags: | - type=sha - type=ref,event=pr - type=ref,event=branch - type=semver,pattern={{version}} - type=semver,pattern={{major}} - type=semver,pattern={{major}}.{{minor}} - type=raw,value=beta,enable={{is_default_branch}} - - - name: Extract Tags - shell: python - run: | - import json, os - - metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON'] - metadata = json.loads(metadata_json) - - tags = [f"--tag '{tag}'" for tag in metadata["tags"]] - tags_string = " ".join(tags) - - with open(os.environ['GITHUB_ENV'], 'a') as env: - env.write(f"TAGS={tags_string}\n") - - - name: Extract Annotations - shell: python - run: | - import json, os - - metadata_json = os.environ['DOCKER_METADATA_OUTPUT_JSON'] - metadata = json.loads(metadata_json) - - annotations = [f"--annotation '{annotation}'" for annotation in metadata["annotations"]] - annotations_string = " ".join(annotations) - - with open(os.environ['GITHUB_ENV'], 'a') as env: - env.write(f"ANNOTATIONS={annotations_string}\n") - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - # This is the user that triggered the Workflow. In this case, it will - # either be the user whom created the Release or manually triggered - # the workflow_dispatch. - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Create & Push Manifest List - working-directory: /tmp/digests - run: | - docker buildx imagetools create \ - ${{ env.TAGS }} \ - ${{ env.ANNOTATIONS }} \ - $(printf 'ghcr.io/${{ env.REPO_NAME }}@sha256:%s ' *) - - - name: Inspect and Run Multi-Platform Image - run: | - docker buildx imagetools inspect --raw ${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} - docker buildx imagetools inspect --raw ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} - docker run --rm ghcr.io/${{ env.REPO_NAME }}:${{ steps.meta.outputs.version }} version diff --git a/.github/workflows/build_publish_docker_plugin.yml b/.github/workflows/build_publish_docker_plugin.yml deleted file mode 100644 index 5c5a1c665ef9a..0000000000000 --- a/.github/workflows/build_publish_docker_plugin.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -# Github Actions release for rclone -# -*- compile-command: "yamllint -f parsable build_publish_docker_plugin.yml" -*- - -name: Release Build for Docker Plugin - -on: - release: - types: [published] - workflow_dispatch: - inputs: - manual: - description: Manual run (bypass default conditions) - type: boolean - default: true - -jobs: - build_docker_volume_plugin: - if: inputs.manual || github.repository == 'rclone/rclone' - name: Build docker plugin job - runs-on: ubuntu-latest - steps: - - name: Free some space - shell: bash - run: | - df -h . - # Remove android SDK - sudo rm -rf /usr/local/lib/android || true - # Remove .net runtime - sudo rm -rf /usr/share/dotnet || true - df -h . - - name: Checkout master - uses: actions/checkout@v5 - with: - fetch-depth: 0 - - name: Build and publish docker plugin - shell: bash - run: | - VER=${GITHUB_REF#refs/tags/} - PLUGIN_USER=rclone - docker login --username ${{ secrets.DOCKER_HUB_USER }} \ - --password-stdin <<< "${{ secrets.DOCKER_HUB_PASSWORD }}" - for PLUGIN_ARCH in amd64 arm64 arm/v7 arm/v6 ;do - export PLUGIN_USER PLUGIN_ARCH - make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-} - make docker-plugin PLUGIN_TAG=${PLUGIN_ARCH/\//-}-${VER#v} - done - make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=latest - make docker-plugin PLUGIN_ARCH=amd64 PLUGIN_TAG=${VER#v} diff --git a/.github/workflows/notify.yml b/.github/workflows/notify.yml deleted file mode 100644 index 5c5bfb49d6cff..0000000000000 --- a/.github/workflows/notify.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: Notify users based on issue labels - -on: - issues: - types: [labeled] - -jobs: - notify: - runs-on: ubuntu-latest - steps: - - uses: jenschelkopf/issue-label-notification-action@1.3 - with: - token: ${{ secrets.NOTIFY_ACTION_TOKEN }} - recipients: | - Support Contract=@rclone/support diff --git a/.github/workflows/winget.yml b/.github/workflows/winget.yml deleted file mode 100644 index 378e45a455316..0000000000000 --- a/.github/workflows/winget.yml +++ /dev/null @@ -1,14 +0,0 @@ -name: Publish to Winget -on: - release: - types: [released] - -jobs: - publish: - runs-on: ubuntu-latest - steps: - - uses: vedantmgoyal2009/winget-releaser@v2 - with: - identifier: Rclone.Rclone - installers-regex: '-windows-\w+\.zip$' - token: ${{ secrets.WINGET_TOKEN }} diff --git a/.gitignore b/.gitignore index 70efa8580e990..e0e06090276dd 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,4 @@ __pycache__ .DS_Store resource_windows_*.syso .devcontainer +.aider* diff --git a/Dockerfile b/Dockerfile index bb086159492c3..f9a8558c1250b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,49 +1,21 @@ -FROM golang:alpine AS builder - -ARG CGO_ENABLED=0 - -WORKDIR /go/src/github.com/rclone/rclone/ - -RUN echo "**** Set Go Environment Variables ****" && \ - go env -w GOCACHE=/root/.cache/go-build - -RUN echo "**** Install Dependencies ****" && \ - apk add --no-cache \ - make \ - bash \ - gawk \ - git - -COPY go.mod . -COPY go.sum . - -RUN echo "**** Download Go Dependencies ****" && \ - go mod download -x +FROM alpine:latest -RUN echo "**** Verify Go Dependencies ****" && \ - go mod verify +RUN apk --no-cache add ca-certificates fuse3 tzdata unzip coreutils && \ + echo "user_allow_other" >> /etc/fuse.conf -COPY . . +ARG TARGETARCH -RUN --mount=type=cache,target=/root/.cache/go-build,sharing=locked \ - echo "**** Build Binary ****" && \ - make +ARG TARGETVARIANT -RUN echo "**** Print Version Binary ****" && \ - ./rclone version +ARG VERSION -# Begin final image -FROM alpine:latest - -RUN echo "**** Install Dependencies ****" && \ - apk add --no-cache \ - ca-certificates \ - fuse3 \ - tzdata && \ - echo "Enable user_allow_other in fuse" && \ - echo "user_allow_other" >> /etc/fuse.conf +COPY build/rclone-${VERSION}-linux-${TARGETARCH}${TARGETVARIANT:+-$TARGETVARIANT}.zip /tmp/rclone.zip -COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/ +RUN unzip /tmp/rclone.zip -d /tmp && \ + mv /tmp/rclone-*-linux-${TARGETARCH}${TARGETVARIANT:+-$TARGETVARIANT}/rclone /usr/local/bin/rclone && \ + chmod +x /usr/local/bin/rclone && \ + rm -rf /tmp/rclone* && \ + apk del unzip RUN addgroup -g 1009 rclone && adduser -u 1009 -Ds /bin/sh -G rclone rclone diff --git a/Makefile b/Makefile index 03242ef8d537d..d6a5149328476 100644 --- a/Makefile +++ b/Makefile @@ -100,7 +100,6 @@ compiletest: check: rclone @echo "-- START CODE QUALITY REPORT -------------------------------" @golangci-lint run $(LINTTAGS) ./... - @bin/markdown-lint @echo "-- END CODE QUALITY REPORT ---------------------------------" # Get the build dependencies @@ -145,11 +144,9 @@ MANUAL.txt: MANUAL.md pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt commanddocs: rclone - go generate ./lib/transform -@rmdir -p '$$HOME/.config/rclone' XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/ @[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1) - go run bin/make_bisync_docs.go ./docs/content/ backenddocs: rclone bin/make_backend_docs.py -@rmdir -p '$$HOME/.config/rclone' @@ -208,12 +205,12 @@ upload: upload_github: ./bin/upload-github $(TAG) -cross: doc - go run bin/cross-compile.go -release current $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) +cross: + go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) beta: go run bin/cross-compile.go $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) - rclone -v copy build/ pub.rclone.org:/$(TAG) + rclone -v copy build/ memstore:pub-rclone-org/$(TAG) @echo Beta release ready at https://pub.rclone.org/$(TAG)/ log_since_last_release: @@ -226,18 +223,18 @@ ci_upload: sudo chown -R $$USER build find build -type l -delete gzip -r9v build - ./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds + ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) - ./rclone --no-check-dest --config bin/ci.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest + ./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest endif @echo Beta release ready at $(BETA_URL)/testbuilds ci_beta: git log $(LAST_TAG).. > /tmp/git-log.txt go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(BUILD_ARGS) $(TAG) - rclone --no-check-dest --config bin/ci.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) + rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD) ifeq ($(or $(BRANCH_PATH),$(RELEASE_TAG)),) - rclone --no-check-dest --config bin/ci.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) + rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR) endif @echo Beta release ready at $(BETA_URL) @@ -246,7 +243,7 @@ fetch_binaries: rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/ serve: website - cd docs && hugo server --logLevel info -w --disableFastRender --ignoreCache + cd docs && hugo server -v -w --disableFastRender tag: retag doc bin/make_changelog.py $(LAST_TAG) $(VERSION) > docs/content/changelog.md.new diff --git a/backend/all/all.go b/backend/all/all.go index 234b2bf50d086..2a01fefa178e0 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -4,6 +4,7 @@ package all import ( // Active file systems _ "github.com/rclone/rclone/backend/alias" + _ "github.com/rclone/rclone/backend/alldebrid" _ "github.com/rclone/rclone/backend/azureblob" _ "github.com/rclone/rclone/backend/azurefiles" _ "github.com/rclone/rclone/backend/b2" @@ -60,6 +61,7 @@ import ( _ "github.com/rclone/rclone/backend/storj" _ "github.com/rclone/rclone/backend/sugarsync" _ "github.com/rclone/rclone/backend/swift" + _ "github.com/rclone/rclone/backend/teldrive" _ "github.com/rclone/rclone/backend/ulozto" _ "github.com/rclone/rclone/backend/union" _ "github.com/rclone/rclone/backend/uptobox" diff --git a/backend/alldebrid/alldebrid.go b/backend/alldebrid/alldebrid.go new file mode 100644 index 0000000000000..9838e4b661111 --- /dev/null +++ b/backend/alldebrid/alldebrid.go @@ -0,0 +1,1256 @@ +// Package alldebrid provides an interface to the alldebrid.com +// object storage system. +package alldebrid + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/rclone/rclone/backend/alldebrid/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" +) + +const ( + minSleep = 10 * time.Millisecond + maxSleep = 2 * time.Second + decayConstant = 2 // bigger for slower decay, exponential + rootURL = "https://api.alldebrid.com" +) + +// Globals + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "alldebrid", + Description: "alldebrid.com", + NewFs: NewFs, + Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { + return &fs.ConfigOut{}, nil + }, + Options: []fs.Option{{ + Name: "api_key", + Help: `API Key.\n\nGet yours from https://alldebrid.com/apikeys`, + Required: true, + Sensitive: true, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + // Encode invalid UTF-8 bytes as json doesn't handle them properly. + Default: (encoder.Display | + encoder.EncodeBackSlash | + encoder.EncodeDoubleQuote | + encoder.EncodeInvalidUtf8), + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + APIKey string `config:"api_key"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs represents a remote cloud storage system +type Fs struct { + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + srv *rest.Client // the connection to the server + pacer *fs.Pacer // pacer for API calls + magnetFilesCache map[int]*MagnetFilesCacheEntry // cache for magnet files +} + +// MagnetFilesCacheEntry holds cached magnet files with expiration +type MagnetFilesCacheEntry struct { + files []api.MagnetFile + expires time.Time +} + +// Object describes a file +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + size int64 // size of the object + modTime time.Time // modification time of the object + id string // ID of the object + mimeType string // Mime type of object + url string // URL to download file + dLink string +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("alldebrid root '%s'", f.root) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// parsePath parses an alldebrid 'url' +func parsePath(path string) (root string) { + root = strings.Trim(path, "/") + return +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + if fserrors.ContextError(ctx, &err) { + return false, err + } + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +// errorHandler parses a non 2xx error response into an error +func errorHandler(resp *http.Response) error { + body, err := rest.ReadBody(resp) + if err != nil { + body = nil + } + var e api.Response + if body != nil { + _ = json.Unmarshal(body, &e) + if e.Error != nil { + return fmt.Errorf("alldebrid error %s: %s", e.Error.Code, e.Error.Message) + } + } + return fmt.Errorf("HTTP %d: %s", resp.StatusCode, string(body)) +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + root = parsePath(root) + + client := fshttp.NewClient(ctx) + + f := &Fs{ + name: name, + root: root, + opt: *opt, + srv: rest.NewClient(client).SetRoot(rootURL), + pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + magnetFilesCache: make(map[int]*MagnetFilesCacheEntry), + } + f.features = (&fs.Features{ + CaseInsensitive: true, + CanHaveEmptyDirectories: true, + ReadMimeType: false, + }).Fill(ctx, f) + f.srv.SetErrorHandler(errorHandler) + + // Set authorization header + if opt.APIKey != "" { + f.srv.SetHeader("Authorization", "Bearer "+opt.APIKey) + } + + // Validate API key + err = f.validateAPIKey(ctx) + if err != nil { + return nil, fmt.Errorf("failed to configure alldebrid: %w", err) + } + + // For alldebrid, check if root points to a file + if root != "" { + if _, err := f.newObjectWithInfo(ctx, root, nil); err == nil { + // Root points to a file, return parent directory + f.root = path.Dir(root) + return f, fs.ErrorIsFile + } + // Root is a directory path + f.root = root + } + + return f, nil +} + +// validateAPIKey validates the API key by calling the user endpoint +func (f *Fs) validateAPIKey(ctx context.Context) error { + var userInfo api.UserResponse + opts := rest.Opts{ + Method: "GET", + Path: "/v4/user", + } + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &userInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + if err := userInfo.AsErr(); err != nil { + return err + } + return nil +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info any) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + var err error + if info != nil { + // Set info based on type + switch v := info.(type) { + case *api.Link: + err = o.setLinkMetaData(v) + case *api.MagnetFile: + err = o.setMagnetFileMetaData(v) + default: + return nil, fmt.Errorf("unsupported info type: %T", info) + } + } else { + // Load all magnets or history and match if it's a file + parts := strings.Split(o.remote, "/") + if len(parts) < 2 { + return nil, fs.ErrorObjectNotFound + } + category := parts[0] + switch category { + case "links": + links, err := f.fetchLinks(ctx) + if err != nil { + return nil, err + } + filename := path.Base(o.remote) + for _, link := range links { + if link.Filename == filename { + err = o.setLinkMetaData(&link) + return o, err + } + } + return nil, fs.ErrorObjectNotFound + case "history": + history, err := f.fetchHistory(ctx) + if err != nil { + return nil, err + } + filename := path.Base(o.remote) + for _, link := range history { + if link.Filename == filename { + err = o.setLinkMetaData(&link) + return o, err + } + } + return nil, fs.ErrorObjectNotFound + case "magnets": + magnetName := parts[1] + magnets, err := f.fetchMagnets(ctx) + if err != nil { + return nil, err + } + for _, magnet := range magnets { + if magnet.Filename == magnetName { + if len(parts) == 2 { + // Single file magnet + o.size = magnet.Size + o.modTime = time.Unix(magnet.UploadDate, 0) + o.id = fmt.Sprintf("%d", magnet.ID) + return o, nil + } else { + // Multi-level path + files, err := f.fetchMagnetFiles(ctx, magnet.ID) + if err != nil { + continue + } + filePath := strings.Join(parts[2:], "/") + targetPath := path.Join(magnet.Filename, filePath) + found := f.findFileInMagnet(files, targetPath) + if found != nil { + err = o.setMagnetFileMetaData(found) + return o, err + } + } + } + } + return nil, fs.ErrorObjectNotFound + default: + return nil, fs.ErrorObjectNotFound + } + } + if err != nil { + return nil, err + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) +} + +// CreateDir makes a directory with pathID as parent and name leaf +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { + // Alldebrid doesn't support creating directories + return "", fs.ErrorNotImplemented +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + // Parse the directory path and handle accordingly + parts := strings.Split(filepath.Join(f.root, strings.Trim(dir, "/")), "/") + + switch len(parts) { + case 1: + if parts[0] == "" { + // Root directory - show virtual directories + now := time.Now() + return fs.DirEntries{ + fs.NewDir("links", now), + fs.NewDir("history", now), + fs.NewDir("magnets", now), + }, nil + } + + // Top level directories + switch parts[0] { + case "links": + return f.listLinksDirectory(ctx, dir) + case "history": + return f.listHistoryDirectory(ctx, dir) + case "magnets": + return f.listMagnetsDirectory(ctx, dir) + default: + return nil, fs.ErrorDirNotFound + } + default: + // Handle deeper paths within virtual directories + if len(parts) >= 2 { + switch parts[0] { + case "magnets": + // parts[1] is the magnet filename, find the corresponding magnet ID + magnets, err := f.fetchMagnets(ctx) + if err != nil { + return nil, err + } + for _, magnet := range magnets { + if magnet.Filename == parts[1] { + subDir := "" + if len(parts) > 2 { + subDir = strings.Join(parts[2:], "/") + } + return f.listMagnetSubDirectory(ctx, &magnet, dir, subDir) + } + } + return nil, fs.ErrorDirNotFound + case "links", "history": + // These are flat directories, no subdirectories supported + return nil, fs.ErrorDirNotFound + default: + return nil, fs.ErrorDirNotFound + } + } + return nil, fs.ErrorDirNotFound + } + +} + +// listLinksDirectory lists the contents of the links directory +func (f *Fs) listLinksDirectory(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + var linksInfo api.LinksResponse + opts := rest.Opts{ + Method: "GET", + Path: "/v4/user/links", + } + + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &linksInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := linksInfo.AsErr(); err != nil { + return nil, err + } + + for i := range linksInfo.Data.Links { + link := &linksInfo.Data.Links[i] + remote := path.Join(dir, link.Filename) + o, err := f.newObjectWithInfo(ctx, remote, link) + if err != nil { + return nil, err + } + entries = append(entries, o) + } + return entries, nil +} + +// listHistoryDirectory lists the contents of the history directory +func (f *Fs) listHistoryDirectory(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + var historyInfo api.HistoryResponse + opts := rest.Opts{ + Method: "GET", + Path: "/v4/user/history", + } + + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &historyInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := historyInfo.AsErr(); err != nil { + return nil, err + } + + for i := range historyInfo.Data.Links { + link := &historyInfo.Data.Links[i] + remote := path.Join(dir, link.Filename) + o, err := f.newObjectWithInfo(ctx, remote, link) + if err != nil { + return nil, err + } + entries = append(entries, o) + } + return entries, nil +} + +// listMagnetsDirectory lists the contents of the magnets directory +func (f *Fs) listMagnetsDirectory(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + magnets, err := f.fetchMagnets(ctx) + if err != nil { + return nil, err + } + + for _, magnet := range magnets { + modTime := time.Unix(magnet.UploadDate, 0) + remote := path.Join(dir, magnet.Filename) + + if magnet.NBLinks == 1 { + // Single file magnet - list as file + obj := &Object{ + fs: f, + remote: remote, + size: magnet.Size, + modTime: modTime, + id: fmt.Sprintf("%d", magnet.ID), + } + entries = append(entries, obj) + } else { + // Multi-file magnet - list as directory + d := fs.NewDir(remote, modTime) + d.SetID(fmt.Sprintf("%d", magnet.ID)) + entries = append(entries, d) + } + } + return entries, nil +} + +// listMagnetSubDirectory lists files within a subdirectory of a magnet +func (f *Fs) listMagnetSubDirectory(ctx context.Context, magnet *api.Magnet, dir, subDir string) (entries fs.DirEntries, err error) { + // Use the provided magnet object (already validated) + + // Now fetch files for this specific magnet + var filesInfo api.MagnetFilesResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4/magnet/files", + } + + opts.Parameters = url.Values{} + opts.Parameters.Set("id[]", fmt.Sprintf("%d", magnet.ID)) + + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &filesInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := filesInfo.AsErr(); err != nil { + return nil, err + } + + // Process the file tree + for _, magnetFiles := range filesInfo.Data.Magnets { + if magnetFiles.ID == fmt.Sprintf("%d", magnet.ID) { + f.processMagnetFileTree(dir, "", path.Join(magnet.Filename, subDir), magnet.Filename, magnetFiles.Files, &entries) + break + } + } + return entries, nil +} + +// processMagnetFileTree recursively processes the magnet file tree +// +// This function handles the conversion of Alldebrid's magnet file structure +// to rclone's filesystem entries. Files with download links are treated as +// regular files, while entries with subdirectories are processed recursively. +// Files without links but with size > 0 are assumed to be files, others as directories. +// listPath specifies the subdirectory to list (empty string for root). +func (f *Fs) processMagnetFileTree(dir, currentPath, listPath, magnetFilename string, files []api.MagnetFile, entries *fs.DirEntries) { + for _, file := range files { + fullPath := path.Join(currentPath, file.Name) + + // Determine if we should include this entry + shouldInclude := false + var displayName string + + if listPath == "" { + // Root listing: include all files with adjusted path to avoid duplication + if file.Link != "" || (file.Size > 0 && len(file.Entries) == 0) { + shouldInclude = true + displayName, _ = strings.CutPrefix(fullPath, magnetFilename+"/") + + } + } else { + // Subdirectory listing: include immediate children of the target directory + if currentPath == listPath { + shouldInclude = true + displayName = file.Name + } + } + + if shouldInclude { + remote := path.Join(dir, displayName) + + if file.Link != "" { + // This is a file + obj := &Object{ + fs: f, + remote: remote, + size: file.Size, + modTime: time.Now(), // We don't have mod time for magnet files + url: file.Link, + } + *entries = append(*entries, obj) + } else if len(file.Entries) > 0 { + // This is a directory + if listPath == "" { + // For root listing, adjust displayName + dirDisplayName, _ := strings.CutPrefix(fullPath, magnetFilename+"/") + dirRemote := path.Join(dir, dirDisplayName) + d := fs.NewDir(dirRemote, time.Now()) + *entries = append(*entries, d) + } else { + d := fs.NewDir(remote, time.Now()) + *entries = append(*entries, d) + } + } else { + // This might be a file without a link (not yet available) + if file.Size > 0 { + // Assume it's a file + obj := &Object{ + fs: f, + remote: remote, + size: file.Size, + modTime: time.Now(), + } + *entries = append(*entries, obj) + } else { + // Assume it's a directory + if listPath == "" { + // For root listing, adjust displayName + dirDisplayName, _ := strings.CutPrefix(fullPath, magnetFilename+"/") + dirRemote := path.Join(dir, dirDisplayName) + d := fs.NewDir(dirRemote, time.Now()) + *entries = append(*entries, d) + } else { + d := fs.NewDir(remote, time.Now()) + *entries = append(*entries, d) + } + } + } + } + + // Always recurse into subdirectories to find the target path + if len(file.Entries) > 0 { + f.processMagnetFileTree(dir, fullPath, listPath, magnetFilename, file.Entries, entries) + } + } +} + +// fetchLinks fetches the saved links +func (f *Fs) fetchLinks(ctx context.Context) ([]api.Link, error) { + var linksInfo api.LinksResponse + opts := rest.Opts{ + Method: "GET", + Path: "/v4/user/links", + } + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &linksInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := linksInfo.AsErr(); err != nil { + return nil, err + } + + return linksInfo.Data.Links, nil +} + +// fetchHistory fetches the download history +func (f *Fs) fetchHistory(ctx context.Context) ([]api.Link, error) { + var historyInfo api.HistoryResponse + opts := rest.Opts{ + Method: "GET", + Path: "/v4/user/history", + } + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &historyInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := historyInfo.AsErr(); err != nil { + return nil, err + } + + return historyInfo.Data.Links, nil +} + +// fetchMagnetFiles fetches files for a specific magnet with caching +func (f *Fs) fetchMagnetFiles(ctx context.Context, magnetID int) ([]api.MagnetFile, error) { + // Check cache first + if entry, ok := f.magnetFilesCache[magnetID]; ok && time.Now().Before(entry.expires) { + return entry.files, nil + } + + var filesInfo api.MagnetFilesResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4/magnet/files", + } + + opts.Parameters = url.Values{} + opts.Parameters.Set("id[]", fmt.Sprintf("%d", magnetID)) + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &filesInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := filesInfo.AsErr(); err != nil { + return nil, err + } + + // Return files from the first (and only) magnet + for _, magnetFiles := range filesInfo.Data.Magnets { + if magnetFiles.ID == fmt.Sprintf("%d", magnetID) { + // Cache the result + f.magnetFilesCache[magnetID] = &MagnetFilesCacheEntry{ + files: magnetFiles.Files, + expires: time.Now().Add(3 * time.Hour), + } + return magnetFiles.Files, nil + } + } + return nil, fmt.Errorf("magnet files not found for ID %d", magnetID) +} + +// findFileInMagnet finds a file in the magnet file tree by path +func (f *Fs) findFileInMagnet(files []api.MagnetFile, targetPath string) *api.MagnetFile { + return f.findFileRecursive(files, "", targetPath) +} + +func (f *Fs) findFileRecursive(files []api.MagnetFile, currentPath, targetPath string) *api.MagnetFile { + for _, file := range files { + fullPath := path.Join(currentPath, file.Name) + if fullPath == targetPath { + return &file + } + // Recurse into subdirectories + if len(file.Entries) > 0 { + if found := f.findFileRecursive(file.Entries, fullPath, targetPath); found != nil { + return found + } + } + } + return nil +} + +// fetchMagnets fetches the current magnet list +func (f *Fs) fetchMagnets(ctx context.Context) ([]api.Magnet, error) { + var magnetsInfo api.MagnetStatusResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4.1/magnet/status", + } + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &magnetsInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := magnetsInfo.AsErr(); err != nil { + return nil, err + } + + // Filter for active magnets only (status codes 0-3: Processing/In Queue, Downloading, Compressing/Moving, Uploading) + var activeMagnets []api.Magnet + for _, magnet := range magnetsInfo.Data.Magnets { + if magnet.StatusCode == 4 { + activeMagnets = append(activeMagnets, magnet) + } + } + + return activeMagnets, nil +} + +// Put the object +// +// Copy the reader in to the new object which is returned. +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil) + switch err { + case nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // Not found so create it + return f.PutUnchecked(ctx, in, src, options...) + default: + return nil, err + } +} + +// PutUnchecked the object into the container +// +// This will produce an error if the object already exists. +// +// Copy the reader in to the new object which is returned. +// +// The new object may have been created if an error is returned +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + remote := src.Remote() + + // For alldebrid, we can add links or magnets + if strings.HasPrefix(remote, "links/") { + return f.addLink(ctx, remote, src) + } else if strings.HasPrefix(remote, "magnets/") { + return f.addMagnet(ctx, in, src) + } + + return nil, fs.ErrorNotImplemented +} + +// addLink adds a link to the saved links +func (f *Fs) addLink(ctx context.Context, remote string, src fs.ObjectInfo) (fs.Object, error) { + linkURL := strings.TrimPrefix(src.Remote(), "links/") + + var saveInfo api.LinkSaveResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4/user/links/save", + } + + opts.Parameters = url.Values{} + opts.Parameters.Set("links[]", linkURL) + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &saveInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := saveInfo.AsErr(); err != nil { + return nil, err + } + + // Create a virtual object representing the saved link + o := &Object{ + fs: f, + remote: remote, + url: linkURL, + } + return o, nil +} + +// addMagnet adds a magnet or torrent file +func (f *Fs) addMagnet(ctx context.Context, in io.Reader, src fs.ObjectInfo) (fs.Object, error) { + remote := src.Remote() + + // Extract the magnet identifier from the path + parts := strings.Split(strings.TrimPrefix(remote, "magnets/"), "/") + if len(parts) == 0 { + return nil, errors.New("invalid magnet path") + } + + magnetInput := parts[0] + + // Check if it's a magnet URI (starts with magnet:) + if strings.HasPrefix(magnetInput, "magnet:") { + return f.uploadMagnetURI(ctx, magnetInput) + } + + // Check if it's a torrent file (has .torrent extension or contains torrent data) + if strings.HasSuffix(magnetInput, ".torrent") || f.isTorrentData(in) { + return f.uploadTorrentFile(ctx, in, src) + } + + // Default to treating as magnet URI + return f.uploadMagnetURI(ctx, magnetInput) +} + +// uploadMagnetURI uploads a magnet URI to alldebrid +func (f *Fs) uploadMagnetURI(ctx context.Context, magnetURI string) (fs.Object, error) { + var uploadInfo api.MagnetUploadResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4/magnet/upload", + } + + opts.Parameters = url.Values{} + opts.Parameters.Set("magnets[]", magnetURI) + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &uploadInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := uploadInfo.AsErr(); err != nil { + return nil, err + } + + // Check if upload was successful + if len(uploadInfo.Data.Magnets) == 0 { + return nil, errors.New("no magnet upload result received") + } + + magnet := uploadInfo.Data.Magnets[0] + if magnet.Error != nil { + return nil, magnet.Error + } + + // Create a virtual object representing the uploaded magnet + o := &Object{ + fs: f, + remote: fmt.Sprintf("magnets/%d", magnet.ID), + size: magnet.Size, + modTime: time.Now(), + id: fmt.Sprintf("%d", magnet.ID), + } + + return o, nil +} + +// uploadTorrentFile uploads a torrent file to alldebrid +func (f *Fs) uploadTorrentFile(ctx context.Context, in io.Reader, src fs.ObjectInfo) (fs.Object, error) { + // Read the torrent file data + torrentData, err := io.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("failed to read torrent file: %w", err) + } + + var uploadInfo api.MagnetUploadFileResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4/magnet/upload/file", + } + + // Create multipart form data + opts.MultipartParams = url.Values{} + opts.MultipartParams.Set("file", string(torrentData)) + opts.MultipartParams.Set("name", src.Remote()) + + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &uploadInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + if err := uploadInfo.AsErr(); err != nil { + return nil, err + } + + // Check if upload was successful + if len(uploadInfo.Data.Files) == 0 { + return nil, errors.New("no torrent upload result received") + } + + file := uploadInfo.Data.Files[0] + if file.Error != nil { + return nil, file.Error + } + + // Create a virtual object representing the uploaded torrent + o := &Object{ + fs: f, + remote: fmt.Sprintf("magnets/%d", file.ID), + size: file.Size, + modTime: time.Now(), + id: fmt.Sprintf("%d", file.ID), + } + + return o, nil +} + +// isTorrentData checks if the input data looks like a torrent file +func (f *Fs) isTorrentData(in io.Reader) bool { + // Read first few bytes to check for torrent file signature + buf := make([]byte, 11) + n, err := in.Read(buf) + if err != nil || n < 11 { + return false + } + + // Torrent files start with "d8:announce" or similar bencoded data + // For simplicity, we'll check if it starts with 'd' (dictionary in bencode) + return buf[0] == 'd' +} + +// Mkdir creates the container if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + // Alldebrid doesn't support creating directories + return fs.ErrorNotImplemented +} + +// Rmdir deletes the root folder +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + // Alldebrid doesn't support deleting directories + return fs.ErrorNotImplemented +} + +// Precision return the precision of this Fs +func (f *Fs) Precision() time.Duration { + return fs.ModTimeNotSupported +} + +// Purge deletes all the files in the directory +// +// Optional interface: Only implement this if you have a way of +// deleting all the files quicker than just running Remove() on the +// result of List() +func (f *Fs) Purge(ctx context.Context, dir string) error { + // Alldebrid doesn't support purging directories + return fs.ErrorCantPurge +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the SHA-1 of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +// setLinkMetaData sets the metadata from a link +func (o *Object) setLinkMetaData(info *api.Link) (err error) { + o.size = info.Size + o.modTime = time.Unix(info.Date, 0) + o.url = info.Link + return nil +} + +// setMagnetFileMetaData sets the metadata from a magnet file +func (o *Object) setMagnetFileMetaData(info *api.MagnetFile) (err error) { + o.size = info.Size + o.modTime = time.Now() // We don't have mod time for magnet files + o.url = info.Link + return nil +} + +// unlockLink unlocks a link to get the download URL +func (o *Object) unlockLink(ctx context.Context) error { + var unlockInfo api.LinkUnlockResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4/link/unlock", + } + + opts.Parameters = url.Values{} + opts.Parameters.Set("link", o.url) + + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &unlockInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + if err := unlockInfo.AsErr(); err != nil { + return err + } + + o.size = unlockInfo.Data.Filesize + o.dLink = unlockInfo.Data.Link + return nil +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + return fs.ErrorCantSetModTime +} + +// Storable returns a boolean showing whether this object storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + if o.url == "" { + // For single-file magnets, fetch the link from /v4/magnet/files + if o.id != "" { + magnetID, err := strconv.Atoi(o.id) + if err == nil { + files, err := o.fs.fetchMagnetFiles(ctx, magnetID) + if err == nil && len(files) == 1 { + o.url = files[0].Link + } + } + } + if o.url == "" { + return nil, errors.New("can't download - no URL") + } + } + if o.dLink == "" { + err = o.unlockLink(ctx) + } + if err != nil { + return nil, err + } + fs.FixRangeOption(options, o.size) + var resp *http.Response + opts := rest.Opts{ + Path: "", + RootURL: o.dLink, + Method: "GET", + Options: options, + } + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = o.fs.srv.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return resp.Body, err +} + +// Update the object with the contents of the io.Reader, modTime and size +// +// If existing is set then it updates the object rather than creating a new one. +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + // Alldebrid doesn't support updating existing objects + return fs.ErrorNotImplemented +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + // For links, we can remove from saved links + if strings.HasPrefix(o.remote, "links/") { + return o.removeLink(ctx) + } + // For magnets, we can delete magnets + if strings.HasPrefix(o.remote, "magnets/") { + return o.removeMagnet(ctx) + } + return fs.ErrorNotImplemented +} + +// removeLink removes a link from saved links +func (o *Object) removeLink(ctx context.Context) error { + var deleteInfo api.LinkDeleteResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4/user/links/delete", + } + + opts.Parameters = url.Values{} + opts.Parameters.Set("links[]", o.url) + + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &deleteInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + if err := deleteInfo.AsErr(); err != nil { + return err + } + return nil +} + +// removeMagnet removes a magnet +func (o *Object) removeMagnet(ctx context.Context) error { + // Extract magnet filename from remote path + parts := strings.Split(o.remote, "/") + if len(parts) < 2 { + return fs.ErrorObjectNotFound + } + + magnetFilename := parts[1] + + // Find the magnet by filename to get ID + magnets, err := o.fs.fetchMagnets(ctx) + if err != nil { + return err + } + var magnetID int + found := false + for _, magnet := range magnets { + if magnet.Filename == magnetFilename { + magnetID = magnet.ID + found = true + break + } + } + if !found { + return fs.ErrorObjectNotFound + } + + var deleteInfo api.MagnetDeleteResponse + opts := rest.Opts{ + Method: "POST", + Path: "/v4/magnet/delete", + } + + opts.Parameters = url.Values{} + opts.Parameters.Set("id", fmt.Sprintf("%d", magnetID)) + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &deleteInfo) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + if err := deleteInfo.AsErr(); err != nil { + return err + } + // Clear cache for this magnet + delete(o.fs.magnetFilesCache, magnetID) + return nil +} + +// MimeType of an Object if known, "" otherwise +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.id +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.MimeTyper = (*Object)(nil) + _ fs.IDer = (*Object)(nil) +) diff --git a/backend/alldebrid/alldebrid_test.go b/backend/alldebrid/alldebrid_test.go new file mode 100644 index 0000000000000..3b28ccc03cee6 --- /dev/null +++ b/backend/alldebrid/alldebrid_test.go @@ -0,0 +1,16 @@ +package alldebrid_test + +import ( + "testing" + + "github.com/rclone/rclone/backend/alldebrid" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestAlldebrid:", + NilObject: (*alldebrid.Object)(nil), + }) +} diff --git a/backend/alldebrid/api/types.go b/backend/alldebrid/api/types.go new file mode 100644 index 0000000000000..b847895d24091 --- /dev/null +++ b/backend/alldebrid/api/types.go @@ -0,0 +1,258 @@ +// Package api contains definitions for using the alldebrid API +package api + +import "fmt" + +// Response is returned by all messages and embedded in the +// structures below +type Response struct { + Status string `json:"status"` + Error *Error `json:"error,omitempty"` + Data any `json:"data,omitempty"` +} + +// Error represents an API error response +type Error struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// AsErr checks the status and returns an err if bad or nil if good +func (r *Response) AsErr() error { + if r.Status != "success" && r.Error != nil { + return r.Error + } + return nil +} + +// Error satisfies the error interface +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code, e.Message) +} + +// UserResponse represents the response from /v4/user +type UserResponse struct { + Response + Data struct { + User User `json:"user"` + } `json:"data"` +} + +// User represents user information +type User struct { + Username string `json:"username"` + Email string `json:"email"` + IsPremium bool `json:"isPremium"` + IsSubscribed bool `json:"isSubscribed"` + IsTrial bool `json:"isTrial"` + PremiumUntil int64 `json:"premiumUntil"` + Lang string `json:"lang"` + PreferedDomain string `json:"preferedDomain"` + FidelityPoints int `json:"fidelityPoints"` + LimitedHostersQuotas map[string]int `json:"limitedHostersQuotas"` + Notifications []string `json:"notifications"` + RemainingTrialQuota *int `json:"remainingTrialQuota,omitempty"` +} + +// LinksResponse represents the response from /v4/user/links +type LinksResponse struct { + Response + Data struct { + Links []Link `json:"links"` + } `json:"data"` +} + +// HistoryResponse represents the response from /v4/user/history +type HistoryResponse struct { + Response + Data struct { + Links []Link `json:"links"` + } `json:"data"` +} + +// Link represents a saved or history link +type Link struct { + Link string `json:"link"` + Filename string `json:"filename"` + Size int64 `json:"size"` + Date int64 `json:"date"` + Host string `json:"host"` +} + +// MagnetStatusResponse represents the response from /v4.1/magnet/status +type MagnetStatusResponse struct { + Response + Data struct { + Magnets []Magnet `json:"magnets"` + Counter int `json:"counter,omitempty"` + Fullsync bool `json:"fullsync,omitempty"` + } `json:"data"` +} + +// Magnet represents a magnet download +type Magnet struct { + ID int `json:"id"` + Filename string `json:"filename"` + Size int64 `json:"size"` + Status string `json:"status"` + StatusCode int `json:"statusCode"` + Downloaded int64 `json:"downloaded"` + Uploaded int64 `json:"uploaded"` + Seeders int `json:"seeders"` + DownloadSpeed int64 `json:"downloadSpeed"` + UploadSpeed int64 `json:"uploadSpeed"` + UploadDate int64 `json:"uploadDate"` + CompletionDate int64 `json:"completionDate"` + NBLinks int `json:"nbLinks"` +} + +// MagnetFile represents a file within a magnet +type MagnetFile struct { + Name string `json:"n"` + Size int64 `json:"s,omitempty"` + Link string `json:"l,omitempty"` + Entries []MagnetFile `json:"e,omitempty"` +} + +// MagnetFilesResponse represents the response from /v4/magnet/files +type MagnetFilesResponse struct { + Response + Data struct { + Magnets []MagnetFiles `json:"magnets"` + } `json:"data"` +} + +// MagnetFiles represents files for a specific magnet +type MagnetFiles struct { + ID string `json:"id"` + Files []MagnetFile `json:"files"` + Error *Error `json:"error,omitempty"` +} + +// MagnetUploadResponse represents the response from /v4/magnet/upload +type MagnetUploadResponse struct { + Response + Data struct { + Magnets []MagnetUpload `json:"magnets"` + } `json:"data"` +} + +// MagnetUpload represents the result of uploading a magnet +type MagnetUpload struct { + Magnet string `json:"magnet"` + Hash string `json:"hash,omitempty"` + Name string `json:"name,omitempty"` + Size int64 `json:"size,omitempty"` + Ready bool `json:"ready,omitempty"` + ID int `json:"id,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// MagnetUploadFileResponse represents the response from /v4/magnet/upload/file +type MagnetUploadFileResponse struct { + Response + Data struct { + Files []MagnetUploadFile `json:"files"` + } `json:"data"` +} + +// MagnetUploadFile represents the result of uploading a torrent file +type MagnetUploadFile struct { + File string `json:"file"` + Name string `json:"name,omitempty"` + Size int64 `json:"size,omitempty"` + Hash string `json:"hash,omitempty"` + Ready bool `json:"ready,omitempty"` + ID int `json:"id,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// MagnetDeleteResponse represents the response from /v4/magnet/delete +type MagnetDeleteResponse struct { + Response + Data struct { + Message string `json:"message"` + } `json:"data"` +} + +// MagnetRestartResponse represents the response from /v4/magnet/restart +type MagnetRestartResponse struct { + Response + Data struct { + Message string `json:"message,omitempty"` + Magnets []MagnetRestart `json:"magnets,omitempty"` + } `json:"data"` +} + +// MagnetRestart represents the result of restarting a magnet +type MagnetRestart struct { + Magnet string `json:"magnet"` + Message string `json:"message,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// LinkUnlockResponse represents the response from /v4/link/unlock +type LinkUnlockResponse struct { + Response + Data LinkUnlock `json:"data"` +} + +// LinkUnlock represents an unlocked link +type LinkUnlock struct { + Link string `json:"link"` + Filename string `json:"filename"` + Host string `json:"host"` + Streams []Stream `json:"streams,omitempty"` + Paws bool `json:"paws"` + Filesize int64 `json:"filesize"` + ID string `json:"id"` + HostDomain string `json:"hostDomain"` + Delayed int `json:"delayed,omitempty"` +} + +// Stream represents a streaming option +type Stream struct { + ID string `json:"id"` + Ext string `json:"ext"` + Quality string `json:"quality"` + Filesize int64 `json:"filesize"` + Proto string `json:"proto"` + Name string `json:"name"` +} + +// LinkDelayedResponse represents the response from /v4/link/delayed +type LinkDelayedResponse struct { + Response + Data LinkDelayed `json:"data"` +} + +// LinkDelayed represents a delayed link status +type LinkDelayed struct { + Status int `json:"status"` + TimeLeft int `json:"time_left"` + Link string `json:"link,omitempty"` +} + +// LinkSaveResponse represents the response from /v4/user/links/save +type LinkSaveResponse struct { + Response + Data struct { + Message string `json:"message"` + } `json:"data"` +} + +// LinkDeleteResponse represents the response from /v4/user/links/delete +type LinkDeleteResponse struct { + Response + Data struct { + Message string `json:"message"` + } `json:"data"` +} + +// HistoryDeleteResponse represents the response from /v4/user/history/delete +type HistoryDeleteResponse struct { + Response + Data struct { + Message string `json:"message"` + } `json:"data"` +} diff --git a/backend/teldrive/api/types.go b/backend/teldrive/api/types.go new file mode 100644 index 0000000000000..cbe2f611a0f66 --- /dev/null +++ b/backend/teldrive/api/types.go @@ -0,0 +1,147 @@ +// Package api provides types used by the Teldrive API. +package api + +import "time" + +type Error struct { + Code bool `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e Error) Error() string { + out := "api error" + if e.Message != "" { + out += ": " + e.Message + } + return out +} + +type Part struct { + Id int64 + Size int64 + Name string + Start int64 + End int64 +} + +// FileInfo represents a file when listing folder contents +type FileInfo struct { + Id string `json:"id"` + Name string `json:"name"` + MimeType string `json:"mimeType"` + Size int64 `json:"size"` + ParentId string `json:"parentId"` + Type string `json:"type"` + ModTime time.Time `json:"updatedAt"` +} + +type Meta struct { + Count int `json:"count,omitempty"` + TotalPages int `json:"totalPages,omitempty"` + CurrentPage int `json:"currentPage,omitempty"` +} + +type ReadMetadataResponse struct { + Files []FileInfo `json:"items"` + Meta Meta `json:"meta"` +} + +// MetadataRequestOptions represents all the options when listing folder contents +type MetadataRequestOptions struct { + Page int64 + Limit int64 +} + +type CreateDirRequest struct { + Path string `json:"path"` +} + +type PartFile struct { + Name string `json:"name"` + PartId int `json:"partId"` + PartNo int `json:"partNo"` + TotalParts int `json:"totalParts"` + Size int64 `json:"size"` + ChannelID int64 `json:"channelId"` + Encrypted bool `json:"encrypted"` + Salt string `json:"salt"` +} + +type FilePart struct { + ID int `json:"id"` + Salt string `json:"salt,omitempty"` +} + +type CreateFileRequest struct { + Name string `json:"name"` + Type string `json:"type"` + Path string `json:"path,omitempty"` + MimeType string `json:"mimeType,omitempty"` + Size int64 `json:"size,omitempty"` + ChannelID int64 `json:"channelId,omitempty"` + Encrypted bool `json:"encrypted,omitempty"` + Parts []FilePart `json:"parts,omitempty"` + ParentId string `json:"parentId,omitempty"` + ModTime time.Time `json:"updatedAt,omitempty"` +} + +type MoveFileRequest struct { + Destination string `json:"destinationParent,omitempty"` + DestinationLeaf string `json:"destinationName,omitempty"` + Files []string `json:"ids,omitempty"` +} +type DirMove struct { + Source string `json:"source"` + Destination string `json:"destination"` +} + +type UpdateFileInformation struct { + Name string `json:"name,omitempty"` + ModTime *time.Time `json:"updatedAt,omitempty"` + Parts []FilePart `json:"parts,omitempty"` + Size int64 `json:"size,omitempty"` + UploadId string `json:"uploadId,omitempty"` + ChannelID int64 `json:"channelId,omitempty"` + ParentID string `json:"parentId,omitempty"` + Encrypted bool `json:"encrypted,omitempty"` +} + +type RemoveFileRequest struct { + Source string `json:"source,omitempty"` + Files []string `json:"ids,omitempty"` +} +type CopyFile struct { + Newname string `json:"newName"` + Destination string `json:"destination"` + ModTime time.Time `json:"updatedAt,omitempty"` +} + +type Session struct { + UserName string `json:"userName"` + UserId int64 `json:"userId"` + Hash string `json:"hash"` +} + +type FileShare struct { + ID string `json:"id,omitempty"` + ExpiresAt *time.Time `json:"expiresAt,omitempty"` +} + +type CategorySize struct { + Size int64 `json:"totalSize"` +} + +type EventSource struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + ParentId string `json:"parentId"` + DestParentId string `json:"destParentId"` +} + +type Event struct { + ID string `json:"id"` + Type string `json:"type"` + CreatedAt time.Time `json:"createdAt"` + Source EventSource `json:"source"` +} diff --git a/backend/teldrive/teldrive.go b/backend/teldrive/teldrive.go new file mode 100644 index 0000000000000..d27f2101a0660 --- /dev/null +++ b/backend/teldrive/teldrive.go @@ -0,0 +1,1285 @@ +// Package teldrive provides an interface to the teldrive storage system. +package teldrive + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/rclone/rclone/backend/teldrive/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" + "golang.org/x/sync/errgroup" +) + +const ( + timeFormat = time.RFC3339 + maxChunkSize = 2000 * fs.Mebi + defaultChunkSize = 500 * fs.Mebi + minChunkSize = 100 * fs.Mebi + authCookieName = "access_token" +) + +var ( + errCanNotUploadFileWithUnknownSize = errors.New("teldrive can't upload files with unknown size") +) + +func init() { + fs.Register(&fs.RegInfo{ + Name: "teldrive", + Description: "Tel Drive", + NewFs: NewFs, + Options: []fs.Option{{ + Help: "Access Token Cookie", + Name: "access_token", + Sensitive: true, + }, { + Help: "Api Host", + Name: "api_host", + Sensitive: true, + }, { + Help: "Chunk Size", + Name: "chunk_size", + Default: defaultChunkSize, + }, { + Help: "Page Size for listing files", + Name: "page_size", + Default: 500, + }, { + Name: "random_chunk_name", + Default: true, + Help: "Random Names For Chunks for Security", + Advanced: true, + }, { + Name: "channel_id", + Help: "Channel ID", + Sensitive: true, + }, { + Name: "upload_concurrency", + Default: 4, + Help: "Upload Concurrency", + Advanced: true, + }, + { + Name: "threaded_streams", + Default: false, + Help: "Thread Streams", + Advanced: true, + }, + { + Help: "Upload Api Host", + Name: "upload_host", + Sensitive: true, + }, + { + Name: "encrypt_files", + Default: false, + Help: "Enable Native Teldrive Encryption", + }, { + + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: encoder.Standard | encoder.EncodeInvalidUtf8, + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + ApiHost string `config:"api_host"` + UploadHost string `config:"upload_host"` + AccessToken string `config:"access_token"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + RootFolderID string `config:"root_folder_id"` + RandomChunkName bool `config:"random_chunk_name"` + UploadConcurrency int `config:"upload_concurrency"` + ChannelID int64 `config:"channel_id"` + EncryptFiles bool `config:"encrypt_files"` + PageSize int64 `config:"page_size"` + ThreadedStreams bool `config:"threaded_streams"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs is the interface a cloud storage system must provide +type Fs struct { + root string + name string + opt Options + features *fs.Features + srv *rest.Client + pacer *fs.Pacer + userId int64 + dirCache *dircache.DirCache + rootFolderID string +} + +// Object represents an teldrive object +type Object struct { + fs *Fs + remote string + id string + size int64 + parentId string + name string + modTime time.Time + mimeType string +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String returns a description of the FS +func (f *Fs) String() string { + return fmt.Sprintf("teldrive root '%s'", f.root) +} + +// Precision of the ModTimes in this Fs +func (f *Fs) Precision() time.Duration { + return time.Second +} + +// Hashes returns the supported hash types of the filesystem +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 429, // Too Many Requests. + 500, // Internal Server Error + 502, // Bad Gateway + 503, // Service Unavailable + 504, // Gateway Timeout + 509, // Bandwidth Limit Exceeded +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + if fserrors.ContextError(ctx, &err) { + return false, err + } + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +func checkUploadChunkSize(cs fs.SizeSuffix) error { + if cs < minChunkSize { + return fmt.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize) + } + if cs > maxChunkSize { + return fmt.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize) + } + return nil +} + +func Ptr[T any](t T) *T { + return &t +} + +// NewFs makes a new Fs object from the path +// +// The path is of the form remote:path +// +// Remotes are looked up in the config file. If the remote isn't +// found then NotFoundInConfigFile will be returned. +// +// On Windows avoid single character remote names as they can be mixed +// up with drive letters. +func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) { + opt := new(Options) + err := configstruct.Set(config, opt) + if err != nil { + return nil, err + } + + err = checkUploadChunkSize(opt.ChunkSize) + if err != nil { + return nil, err + } + + if opt.ChannelID < 0 { + channnelId := strconv.FormatInt(opt.ChannelID, 10) + opt.ChannelID, _ = strconv.ParseInt(strings.TrimPrefix(channnelId, "-100"), 10, 64) + } + + f := &Fs{ + name: name, + root: root, + opt: *opt, + pacer: fs.NewPacer(ctx, pacer.NewDefault()), + } + + f.root = strings.Trim(root, "/") + + f.features = (&fs.Features{ + CanHaveEmptyDirectories: true, + ReadMimeType: true, + ChunkWriterDoesntSeek: true, + }).Fill(ctx, f) + + client := fshttp.NewClient(ctx) + authCookie := http.Cookie{Name: authCookieName, Value: opt.AccessToken} + f.srv = rest.NewClient(client).SetRoot(strings.Trim(opt.ApiHost, "/")).SetCookie(&authCookie) + + opts := rest.Opts{ + Method: "GET", + Path: "/api/auth/session", + } + + var ( + session api.Session + sessionResp *http.Response + ) + + err = f.pacer.Call(func() (bool, error) { + sessionResp, err = f.srv.CallJSON(ctx, &opts, nil, &session) + return shouldRetry(ctx, sessionResp, err) + }) + + if err != nil { + return nil, err + } + if session.UserId == 0 { + return nil, errors.New("invalid session") + } + + for _, cookie := range sessionResp.Cookies() { + if (cookie.Name == authCookieName) && (cookie.Value != "") { + config.Set(authCookieName, cookie.Value) + } + } + + f.userId = session.UserId + + if f.opt.RootFolderID != "" { + f.rootFolderID = f.opt.RootFolderID + } else { + f.rootFolderID, err = f.getRootID(ctx) + if err != nil { + return nil, err + } + config.Set("root_folder_id", f.rootFolderID) + } + f.dirCache = dircache.New(f.root, f.rootFolderID, f) + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it is a file + newRoot, remote := dircache.SplitPath(root) + tempF := *f + tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF) + tempF.root = newRoot + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + // No root so return old f + return f, nil + } + _, err := tempF.NewObject(ctx, remote) + if err != nil { + if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorIsDir) { + // File doesn't exist so return old f + return f, nil + } + return nil, err + } + f.features.Fill(ctx, &tempF) + // XXX: update the old f here instead of returning tempF, since + // `features` were already filled with functions having *f as a receiver. + // See https://github.com/rclone/rclone/issues/2182 + f.dirCache = tempF.dirCache + f.root = tempF.root + return f, fs.ErrorIsFile + + } + return f, nil +} + +func (f *Fs) readMetaDataForPath(ctx context.Context, path string, options *api.MetadataRequestOptions) (*api.ReadMetadataResponse, error) { + + directoryID, err := f.dirCache.FindDir(ctx, path, false) + + if err != nil { + return nil, err + } + opts := rest.Opts{ + Method: "GET", + Path: "/api/files", + Parameters: url.Values{ + "parentId": []string{directoryID}, + "limit": []string{strconv.FormatInt(options.Limit, 10)}, + "sort": []string{"id"}, + "operation": []string{"list"}, + "page": []string{strconv.FormatInt(options.Page, 10)}, + }, + } + var info api.ReadMetadataResponse + var resp *http.Response + + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return &info, nil +} + +func (f *Fs) getRootID(ctx context.Context) (string, error) { + opts := rest.Opts{ + Method: "GET", + Path: "/api/files", + Parameters: url.Values{ + "parentId": []string{"nil"}, + "operation": []string{"find"}, + "name": []string{"root"}, + "type": []string{"folder"}, + }, + } + var err error + var info api.ReadMetadataResponse + var resp *http.Response + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &info) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return "", err + } + if len(info.Files) == 0 { + return "", fmt.Errorf("couldn't find root directory ID: %w", err) + } + return info.Files[0].Id, nil +} + +func (f *Fs) getFileShare(ctx context.Context, id string) (*api.FileShare, error) { + opts := rest.Opts{ + Method: "GET", + Path: "/api/files/" + id + "/share", + } + res := api.FileShare{} + var ( + resp *http.Response + err error + ) + err = f.pacer.Call(func() (bool, error) { + resp, err = f.srv.CallJSON(ctx, &opts, nil, &res) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + if resp.StatusCode == http.StatusNotFound { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + if res.ExpiresAt != nil && res.ExpiresAt.UTC().Before(time.Now().UTC()) { + return nil, fs.ErrorObjectNotFound + } + return &res, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + + opts := &api.MetadataRequestOptions{ + Limit: f.opt.PageSize, + Page: 1, + } + + files := []api.FileInfo{} + + info, err := f.readMetaDataForPath(ctx, dir, opts) + + if err != nil { + return nil, err + } + + files = append(files, info.Files...) + mu := sync.Mutex{} + if info.Meta.TotalPages > 1 { + g, _ := errgroup.WithContext(ctx) + + g.SetLimit(8) + + for i := 2; i <= info.Meta.TotalPages; i++ { + page := i + g.Go(func() error { + opts := &api.MetadataRequestOptions{ + Limit: f.opt.PageSize, + Page: int64(page), + } + info, err := f.readMetaDataForPath(ctx, dir, opts) + if err != nil { + return err + } + mu.Lock() + files = append(files, info.Files...) + mu.Unlock() + return nil + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + } + + for _, item := range files { + remote := path.Join(dir, f.opt.Enc.ToStandardName(item.Name)) + if item.Type == "folder" { + f.dirCache.Put(remote, item.Id) + d := fs.NewDir(remote, item.ModTime).SetID(item.Id).SetParentID(item.ParentId). + SetSize(item.Size) + entries = append(entries, d) + } + if item.Type == "file" { + o, err := f.newObjectWithInfo(ctx, remote, &item) + if err != nil { + continue + } + entries = append(entries, o) + } + + } + return entries, nil +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(_ context.Context, remote string, info *api.FileInfo) (fs.Object, error) { + if info == nil { + return nil, fs.ErrorObjectNotFound + } + o := &Object{ + fs: f, + remote: remote, + id: info.Id, + size: info.Size, + parentId: info.ParentId, + name: info.Name, + modTime: info.ModTime, + mimeType: info.MimeType, + } + if info.Type == "folder" { + return o, fs.ErrorIsDir + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found it +// returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false) + if err != nil { + if err == fs.ErrorDirNotFound { + return nil, fs.ErrorObjectNotFound + } + } + + res, err := f.findObject(ctx, directoryID, leaf) + if err != nil || len(res) == 0 { + return nil, fs.ErrorObjectNotFound + } + if res[0].Type == "folder" { + return nil, fs.ErrorIsDir + } + + return f.newObjectWithInfo(ctx, remote, &res[0]) +} + +func (f *Fs) findObject(ctx context.Context, pathID, leaf string) ([]api.FileInfo, error) { + opts := rest.Opts{ + Method: "GET", + Path: "/api/files", + Parameters: url.Values{ + "parentId": []string{pathID}, + "operation": []string{"find"}, + "name": []string{leaf}, + "sort": []string{"id"}, + "order": []string{"desc"}, + "limit": []string{"1"}, + }, + } + var info api.ReadMetadataResponse + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &info) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return info.Files, nil +} + +func (f *Fs) moveTo(ctx context.Context, id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID string) error { + + if srcDirectoryID != dstDirectoryID { + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/move", + NoResponse: true, + } + mv := api.MoveFileRequest{ + Destination: dstDirectoryID, + Files: []string{id}, + DestinationLeaf: dstLeaf, + } + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &mv, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("couldn't move file: %w", err) + } + } else { + if srcLeaf != dstLeaf { + err := f.updateFileInformation(ctx, &api.UpdateFileInformation{Name: dstLeaf}, id) + if err != nil { + return fmt.Errorf("move: failed rename: %w", err) + } + } + } + + return nil +} + +// updateFileInformation set's various file attributes most importantly it's name +func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileInformation, fileId string) (err error) { + opts := rest.Opts{ + Method: "PATCH", + Path: "/api/files/" + fileId, + NoResponse: true, + } + + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, update, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("couldn't update file info: %w", err) + } + return err +} + +func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, _ ...fs.OpenOption) error { + + o := &Object{ + fs: f, + } + uploadInfo, err := o.uploadMultipart(ctx, bufio.NewReader(in), src) + + if err != nil { + return err + } + + return o.createFile(ctx, src, uploadInfo) +} + +// FindLeaf finds a directory of name leaf in the folder with ID pathID +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { + files, err := f.findObject(ctx, pathID, leaf) + if err != nil { + return "", false, err + } + if len(files) == 0 { + return "", false, nil + } + if files[0].Type == "file" { + return "", false, fs.ErrorIsFile + } + return files[0].Id, true, nil +} + +// Put in to the remote path with the modTime given of the given size +// +// When called from outside an Fs by rclone, src.Size() will always be >= 0. +// But for unknown-sized objects (indicated by src.Size() == -1), Put should either +// return an error or upload it properly (rather than e.g. calling panic). +// +// May create the object even if it returns an error - if so +// will return the object and the error, otherwise will return +// nil and the error +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + if src.Size() < 0 { + return nil, errCanNotUploadFileWithUnknownSize + } + existingObj, err := f.NewObject(ctx, src.Remote()) + switch err { + case nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // Not found so create it + return f.PutUnchecked(ctx, in, src, options...) + default: + return nil, err + } +} + +// PutUnchecked uploads the object +// +// This will create a duplicate if we upload a new file without +// checking to see if there is one already - use Put() for that. +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + err := f.putUnchecked(ctx, in, src, options...) + if err != nil { + return nil, err + } + return f.NewObject(ctx, src.Remote()) +} + +// Update the already existing object +// +// Copy the reader into the object updating modTime and size. +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + + if src.Size() < 0 { + return errCanNotUploadFileWithUnknownSize + } + + remote := o.Remote() + + modTime := src.ModTime(ctx) + + leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) + if err != nil { + return err + } + + var uploadInfo *uploadInfo + + if src.Size() > 0 { + uploadInfo, err = o.uploadMultipart(ctx, bufio.NewReader(in), src) + if err != nil { + return err + } + } + + payload := &api.UpdateFileInformation{ + ModTime: Ptr(modTime.UTC()), + Size: src.Size(), + ParentID: directoryID, + Name: leaf, + } + + if uploadInfo != nil { + payload.Parts = uploadInfo.fileChunks + payload.UploadId = uploadInfo.uploadID + payload.ChannelID = o.fs.opt.ChannelID + payload.Encrypted = uploadInfo.encryptFile + } + + opts := rest.Opts{ + Method: "PUT", + Path: "/api/files/" + o.id + "/parts", + NoResponse: true, + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, payload, nil) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return fmt.Errorf("failed to update file information: %w", err) + } + + o.modTime = modTime + + o.size = src.Size() + + return nil +} + +// ChangeNotify calls the passed function with a path that has had changes. +// If the implementation uses polling, it should adhere to the given interval. +// +// Automatically restarts itself in case of unexpected behavior of the remote. +// +// Close the returned channel to stop being notified. +func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { + go func() { + processedEventIDs := make(map[string]time.Time) + var ticker *time.Ticker + var tickerC <-chan time.Time + for { + select { + case pollInterval, ok := <-pollIntervalChan: + if !ok { + if ticker != nil { + ticker.Stop() + } + return + } + if ticker != nil { + ticker.Stop() + ticker, tickerC = nil, nil + } + if pollInterval != 0 { + ticker = time.NewTicker(pollInterval) + tickerC = ticker.C + } + case <-tickerC: + fs.Debugf(f, "Checking for changes on remote") + for eventID, timestamp := range processedEventIDs { + if time.Since(timestamp) > 5*time.Minute { + delete(processedEventIDs, eventID) + } + } + err := f.changeNotifyRunner(ctx, notifyFunc, processedEventIDs) + if err != nil { + fs.Infof(f, "Change notify listener failure: %s", err) + } + } + } + }() +} + +func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), processedEventIDs map[string]time.Time) error { + + var changes []api.Event + + opts := rest.Opts{ + Method: "GET", + Path: "/api/events", + } + + err := f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &changes) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return err + } + + var pathsToClear []string + for _, change := range changes { + if _, ok := processedEventIDs[change.ID]; ok { + continue + } + addPathToClear := func(parentID string) { + if path, ok := f.dirCache.GetInv(parentID); ok { + pathsToClear = append(pathsToClear, path) + } + } + + // Check original parent location + addPathToClear(change.Source.ParentId) + + // Check destination parent location if file was moved + if change.Source.DestParentId != "" { + addPathToClear(change.Source.DestParentId) + } + processedEventIDs[change.ID] = time.Now() + } + notifiedPaths := make(map[string]bool) + for _, path := range pathsToClear { + if _, ok := notifiedPaths[path]; ok { + continue + } + notifiedPaths[path] = true + notifyFunc(path, fs.EntryDirectory) + } + return nil +} + +// OpenChunkWriter returns the chunk size and a ChunkWriter +// +// Pass in the remote and the src object +// You can also use options to hint at the desired chunk size +func (f *Fs) OpenChunkWriter( + ctx context.Context, + remote string, + src fs.ObjectInfo, + options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { + + if src.Size() <= 0 { + return info, nil, errCanNotUploadFileWithUnknownSize + } + + o := &Object{ + fs: f, + remote: remote, + } + + uploadInfo, err := o.prepareUpload(ctx, src) + + if err != nil { + return info, nil, fmt.Errorf("failed to prepare upload: %w", err) + } + + chunkWriter := &objectChunkWriter{ + size: src.Size(), + f: f, + src: src, + o: o, + uploadInfo: uploadInfo, + } + info = fs.ChunkWriterInfo{ + ChunkSize: uploadInfo.chunkSize, + Concurrency: o.fs.opt.UploadConcurrency, + LeavePartsOnError: true, + } + fs.Debugf(o, "open chunk writer: started upload: %v", uploadInfo.uploadID) + return info, chunkWriter, err +} + +// CreateDir makes a directory with pathID as parent and name leaf +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { + opts := rest.Opts{ + Method: "POST", + Path: "/api/files", + } + mkdir := api.CreateFileRequest{ + Name: leaf, + Type: "folder", + ParentId: pathID, + } + info := api.FileInfo{} + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &mkdir, &info) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return "", err + } + return info.Id, nil +} + +// Mkdir makes the directory (container, bucket) +// +// Shouldn't return an error if it already exists +func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { + _, err = f.dirCache.FindDir(ctx, dir, true) + return err +} + +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("can't purge root directory") + } + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return err + } + + if check { + info, err := f.readMetaDataForPath(ctx, dir, &api.MetadataRequestOptions{ + Limit: 1, + Page: 1, + }) + if err != nil { + return err + } + if len(info.Files) > 0 { + return fs.ErrorDirectoryNotEmpty + } + } + + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/delete", + NoResponse: true, + } + rm := api.RemoveFileRequest{ + Files: []string{directoryID}, + } + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &rm, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + f.dirCache.FlushDir(dir) + return nil +} + +// Rmdir removes the directory (container, bucket) if empty +// +// Return an error if it doesn't exist or isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { + return f.purgeCheck(ctx, dir, true) +} + +// Purge all files in the directory specified +// +// Implement this if you have a way of deleting all the files +// quicker than just running Remove() on the result of List() +// +// Return an error if it doesn't exist +func (f *Fs) Purge(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, false) +} + +// Move src to this remote using server-side move operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false) + if err != nil { + return nil, err + } + + dstLeaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return nil, err + } + + err = f.moveTo(ctx, srcObj.id, srcLeaf, dstLeaf, srcParentID, directoryID) + if err != nil { + return nil, err + } + f.dirCache.FlushDir(src.Remote()) + newObj := *srcObj + newObj.remote = remote + newObj.fs = f + return &newObj, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove + +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + srcID, srcDirectoryID, srcLeaf, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) + if err != nil { + return err + } + err = f.moveTo(ctx, srcID, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) + if err != nil { + return fmt.Errorf("dirmove: failed to move: %w", err) + } + srcFs.dirCache.FlushDir(srcRemote) + return nil +} + +func (o *Object) Remove(ctx context.Context) error { + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/delete", + NoResponse: true, + } + delete := api.RemoveFileRequest{ + Files: []string{o.id}, + } + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, &delete, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + return nil +} + +// PublicLink adds a "readable by anyone with link" permission on the given file or folder. +func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) { + id, err := f.dirCache.FindDir(ctx, remote, false) + if err == nil { + fs.Debugf(f, "attempting to share directory '%s'", remote) + } else { + fs.Debugf(f, "attempting to share single file '%s'", remote) + o, err := f.NewObject(ctx, remote) + if err != nil { + return "", err + } + id = o.(fs.IDer).ID() + } + if unlink { + opts := rest.Opts{ + Method: "DELETE", + Path: "/api/files/" + id + "/share", + NoResponse: true, + } + f.pacer.Call(func() (bool, error) { + resp, err := f.srv.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + return "", nil + } + + share, err := f.getFileShare(ctx, id) + if err != nil { + if !errors.Is(err, fs.ErrorObjectNotFound) { + return "", err + } + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/" + id + "/share", + NoResponse: true, + } + payload := api.FileShare{} + if expire < fs.DurationOff { + dur := time.Now().Add(time.Duration(expire)).UTC() + payload.ExpiresAt = &dur + } + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, &payload, nil) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return "", err + } + share, err = f.getFileShare(ctx, id) + if err != nil { + return "", err + } + } + return fmt.Sprintf("%s/share/%s", f.opt.ApiHost, share.ID), nil +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + var resp *http.Response + + http := o.fs.srv + + fs.FixRangeOption(options, o.size) + + opts := rest.Opts{ + Method: "GET", + Path: fmt.Sprintf("/api/files/%s/%s", o.id, url.QueryEscape(o.name)), + Options: options, + } + if !o.fs.opt.ThreadedStreams { + opts.Parameters = url.Values{ + "download": []string{"1"}, + } + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err = http.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return nil, err + } + return resp.Body, err +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + srcLeaf, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false) + if err != nil { + return nil, err + } + dstLeaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return nil, err + } + + if srcParentID == directoryID && dstLeaf == srcLeaf { + fs.Debugf(src, "Can't copy - change file name") + return nil, fs.ErrorCantCopy + } + + opts := rest.Opts{ + Method: "POST", + Path: "/api/files/" + srcObj.id + "/copy", + } + copy := api.CopyFile{ + Newname: dstLeaf, + Destination: directoryID, + ModTime: srcObj.ModTime(ctx).UTC(), + } + var info api.FileInfo + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, ©, &info) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, err + } + return f.newObjectWithInfo(ctx, remote, &info) +} + +// About gets quota information +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { + opts := rest.Opts{ + Method: "GET", + Path: "/api/files/categories", + } + var stats []api.CategorySize + err = f.pacer.Call(func() (bool, error) { + resp, err := f.srv.CallJSON(ctx, &opts, nil, &stats) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("failed to read user info: %w", err) + } + + total := int64(0) + for category := range stats { + total += stats[category].Size + } + return &fs.Usage{Used: fs.NewUsageValue(total)}, nil +} + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + return o.modTime +} + +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +// Hash returns the Md5sum of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.id +} + +// ParentID implements fs.ParentIDer. +func (o *Object) ParentID() string { + return o.parentId +} + +// Storable returns whether this object is storable +func (o *Object) Storable() bool { + return true +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + updateInfo := &api.UpdateFileInformation{ + ModTime: Ptr(modTime.UTC()), + } + err := o.fs.updateFileInformation(ctx, updateInfo, o.id) + if err != nil { + return fmt.Errorf("couldn't update mod time: %w", err) + } + o.modTime = modTime + return nil +} + +// DirCacheFlush an optional interface to flush internal directory cache +// DirCacheFlush resets the directory cache - used in testing +// as an optional interface +func (f *Fs) DirCacheFlush() { + f.dirCache.ResetRoot() +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.MimeTyper = &Object{} + _ fs.OpenChunkWriter = (*Fs)(nil) + _ fs.IDer = (*Object)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.PublicLinker = (*Fs)(nil) + _ fs.ParentIDer = (*Object)(nil) + _ fs.Abouter = (*Fs)(nil) +) diff --git a/backend/teldrive/teldrive_test.go b/backend/teldrive/teldrive_test.go new file mode 100644 index 0000000000000..98576b13d9799 --- /dev/null +++ b/backend/teldrive/teldrive_test.go @@ -0,0 +1,20 @@ +package teldrive + +import ( + "testing" + + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "teldrive:", + NilObject: (*Object)(nil), + ChunkedUpload: fstests.ChunkedUploadConfig{ + MinChunkSize: minChunkSize, + CeilChunkSize: fstests.NextPowerOfTwo, + }, + SkipInvalidUTF8: true, + }) +} diff --git a/backend/teldrive/upload.go b/backend/teldrive/upload.go new file mode 100644 index 0000000000000..c5ab79846cf17 --- /dev/null +++ b/backend/teldrive/upload.go @@ -0,0 +1,386 @@ +package teldrive + +import ( + "context" + "crypto/md5" + "encoding/hex" + "errors" + "fmt" + "io" + "net/url" + "sort" + "strconv" + "sync" + + "github.com/google/uuid" + "github.com/rclone/rclone/backend/teldrive/api" + "github.com/rclone/rclone/fs/operations" + "github.com/rclone/rclone/lib/pool" + "github.com/rclone/rclone/lib/rest" + + "github.com/rclone/rclone/fs" +) + +type uploadInfo struct { + existingChunks map[int]api.PartFile + uploadID string + channelID int64 + encryptFile bool + chunkSize int64 + totalChunks int64 + fileChunks []api.FilePart + fileName string + dir string +} + +type objectChunkWriter struct { + size int64 + f *Fs + src fs.ObjectInfo + partsToCommitMu sync.Mutex + partsToCommit []api.PartFile + o *Object + uploadInfo *uploadInfo +} + +func getMD5Hash(text string) string { + hash := md5.Sum([]byte(text)) + return hex.EncodeToString(hash[:]) +} + +// WriteChunk will write chunk number with reader bytes, where chunk number >= 0 +func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) { + if chunkNumber < 0 { + err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber) + return -1, err + } + + chunkNumber += 1 + + if existing, ok := w.uploadInfo.existingChunks[chunkNumber]; ok { + switch r := reader.(type) { + case *operations.ReOpen: + r.Account(int(existing.Size)) + case *pool.RW: + r.Account(int(existing.Size)) + default: + } + w.addCompletedPart(existing) + return existing.Size, nil + } + + var ( + response api.PartFile + partName string + ) + + err = w.f.pacer.Call(func() (bool, error) { + + size, err = reader.Seek(0, io.SeekEnd) + if err != nil { + + return false, err + } + + _, err = reader.Seek(0, io.SeekStart) + if err != nil { + return false, err + } + + fs.Debugf(w.o, "Sending chunk %d length %d", chunkNumber, size) + if w.f.opt.RandomChunkName { + partName = getMD5Hash(uuid.New().String()) + } else { + partName = w.uploadInfo.fileName + if w.uploadInfo.totalChunks > 1 { + partName = fmt.Sprintf("%s.part.%03d", w.uploadInfo.fileName, chunkNumber) + } + } + + opts := rest.Opts{ + Method: "POST", + Body: reader, + ContentLength: &size, + ContentType: "application/octet-stream", + Parameters: url.Values{ + "partName": []string{partName}, + "fileName": []string{w.uploadInfo.fileName}, + "partNo": []string{strconv.Itoa(chunkNumber)}, + "channelId": []string{strconv.FormatInt(w.uploadInfo.channelID, 10)}, + "encrypted": []string{strconv.FormatBool(w.uploadInfo.encryptFile)}, + }, + } + + if w.f.opt.UploadHost != "" { + opts.RootURL = w.f.opt.UploadHost + "/api/uploads/" + w.uploadInfo.uploadID + + } else { + opts.Path = "/api/uploads/" + w.uploadInfo.uploadID + } + + resp, err := w.f.srv.CallJSON(ctx, &opts, nil, &response) + + retry, err := shouldRetry(ctx, resp, err) + + if err != nil { + fs.Debugf(w.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) + } + if response.PartId == 0 { + return true, fmt.Errorf("error sending chunk %d", chunkNumber) + } + + return retry, err + + }) + + if err != nil { + return 0, fmt.Errorf("error sending chunk %d: %v", chunkNumber, err) + } + + w.addCompletedPart(response) + fs.Debugf(w.o, "Done sending chunk %d", chunkNumber) + + return size, err + +} + +// add a part number and etag to the completed parts +func (w *objectChunkWriter) addCompletedPart(part api.PartFile) { + w.partsToCommitMu.Lock() + defer w.partsToCommitMu.Unlock() + w.partsToCommit = append(w.partsToCommit, part) +} + +func (w *objectChunkWriter) Close(ctx context.Context) error { + + if w.uploadInfo.totalChunks != int64(len(w.partsToCommit)) { + return fmt.Errorf("uploaded failed") + } + + sort.Slice(w.partsToCommit, func(i, j int) bool { + return w.partsToCommit[i].PartNo < w.partsToCommit[j].PartNo + }) + + fileChunks := []api.FilePart{} + + for _, part := range w.partsToCommit { + fileChunks = append(fileChunks, api.FilePart{ID: part.PartId, Salt: part.Salt}) + } + + w.uploadInfo.fileChunks = fileChunks + + return w.o.createFile(ctx, w.src, w.uploadInfo) +} + +func (*objectChunkWriter) Abort(ctx context.Context) error { + return nil +} + +func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo) (*uploadInfo, error) { + + leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, src.Remote(), true) + + if err != nil { + return nil, err + } + + uploadID := getMD5Hash(fmt.Sprintf("%s:%s:%d:%d", directoryID, leaf, src.Size(), o.fs.userId)) + + var ( + uploadParts []api.PartFile + existingChunks map[int]api.PartFile + ) + + opts := rest.Opts{ + Method: "GET", + Path: "/api/uploads/" + uploadID, + } + + chunkSize := int64(o.fs.opt.ChunkSize) + + if chunkSize < src.Size() { + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &uploadParts) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return nil, err + } + existingChunks = make(map[int]api.PartFile, len(uploadParts)) + for _, part := range uploadParts { + existingChunks[part.PartNo] = part + } + + } + + totalChunks := src.Size() / chunkSize + + if src.Size()%chunkSize != 0 { + totalChunks++ + } + + channelID := o.fs.opt.ChannelID + + encryptFile := o.fs.opt.EncryptFiles + + if len(uploadParts) > 0 { + channelID = uploadParts[0].ChannelID + encryptFile = uploadParts[0].Encrypted + } + + return &uploadInfo{ + existingChunks: existingChunks, + uploadID: uploadID, + channelID: channelID, + encryptFile: encryptFile, + chunkSize: chunkSize, + totalChunks: totalChunks, + fileName: leaf, + dir: directoryID, + }, nil +} + +func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo) (*uploadInfo, error) { + + size := src.Size() + + if size < 0 { + return nil, errors.New("unknown-sized upload not supported") + } + + uploadInfo, err := o.prepareUpload(ctx, src) + + if err != nil { + return nil, err + } + + if size > 0 { + + var ( + partsToCommit []api.PartFile + uploadedSize int64 + ) + + totalChunks := int(uploadInfo.totalChunks) + + for chunkNo := 1; chunkNo <= totalChunks; chunkNo++ { + if existing, ok := uploadInfo.existingChunks[chunkNo]; ok { + io.CopyN(io.Discard, in, existing.Size) + partsToCommit = append(partsToCommit, existing) + uploadedSize += existing.Size + continue + } + + n := uploadInfo.chunkSize + + if chunkNo == totalChunks { + n = src.Size() - uploadedSize + } + + chunkName := uploadInfo.fileName + + if o.fs.opt.RandomChunkName { + chunkName = getMD5Hash(uuid.New().String()) + } else if totalChunks > 1 { + chunkName = fmt.Sprintf("%s.part.%03d", chunkName, chunkNo) + } + + partReader := io.LimitReader(in, n) + + opts := rest.Opts{ + Method: "POST", + Body: partReader, + ContentLength: &n, + ContentType: "application/octet-stream", + Parameters: url.Values{ + "partName": []string{chunkName}, + "fileName": []string{uploadInfo.fileName}, + "partNo": []string{strconv.Itoa(chunkNo)}, + "channelId": []string{strconv.FormatInt(uploadInfo.channelID, 10)}, + "encrypted": []string{strconv.FormatBool(uploadInfo.encryptFile)}, + }, + } + + if o.fs.opt.UploadHost != "" { + opts.RootURL = o.fs.opt.UploadHost + "/api/uploads/" + uploadInfo.uploadID + + } else { + opts.Path = "/api/uploads/" + uploadInfo.uploadID + } + + var partInfo api.PartFile + + _, err := o.fs.srv.CallJSON(ctx, &opts, nil, &partInfo) + + if err != nil { + return nil, err + } + + uploadedSize += n + + partsToCommit = append(partsToCommit, partInfo) + } + + sort.Slice(partsToCommit, func(i, j int) bool { + return partsToCommit[i].PartNo < partsToCommit[j].PartNo + }) + + fileChunks := []api.FilePart{} + + for _, part := range partsToCommit { + fileChunks = append(fileChunks, api.FilePart{ID: part.PartId, Salt: part.Salt}) + } + + uploadInfo.fileChunks = fileChunks + } + + return uploadInfo, nil + +} + +func (o *Object) createFile(ctx context.Context, src fs.ObjectInfo, uploadInfo *uploadInfo) error { + + opts := rest.Opts{ + Method: "POST", + Path: "/api/files", + NoResponse: true, + } + + payload := api.CreateFileRequest{ + Name: uploadInfo.fileName, + Type: "file", + ParentId: uploadInfo.dir, + MimeType: fs.MimeType(ctx, src), + Size: src.Size(), + Parts: uploadInfo.fileChunks, + ChannelID: uploadInfo.channelID, + Encrypted: uploadInfo.encryptFile, + ModTime: src.ModTime(ctx).UTC(), + } + + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, &payload, nil) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return err + } + if src.Size() > 0 { + opts = rest.Opts{ + Method: "DELETE", + Path: "/api/uploads/" + uploadInfo.uploadID, + NoResponse: true, + } + + err = o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.Call(ctx, &opts) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return err + } + } + return nil +} diff --git a/bin/upload-github b/bin/upload-github index 0ebdc708dab20..a1f01a7f3c3bd 100755 --- a/bin/upload-github +++ b/bin/upload-github @@ -6,7 +6,7 @@ set -e -REPO="rclone/rclone" +REPO="tgdrive/rclone" if [ "$1" == "" ]; then echo "Syntax: $0 Version" @@ -29,7 +29,7 @@ gh release create "${VERSION}" \ --notes-file "/tmp/${VERSION}-release-notes" \ --draft=true -for build in build/*; do +for build in /tmp/build/*; do case $build in *current*) continue ;; *testbuilds*) continue ;; diff --git a/bin/zip.3.0.nupkg b/bin/zip.3.0.nupkg new file mode 100644 index 0000000000000..dfa2739836fc4 Binary files /dev/null and b/bin/zip.3.0.nupkg differ diff --git a/fs/operations/reopen.go b/fs/operations/reopen.go index 9b86c6da3b5f6..ed56941fa6d2f 100644 --- a/fs/operations/reopen.go +++ b/fs/operations/reopen.go @@ -302,6 +302,14 @@ func (h *ReOpen) SetAccounting(account AccountFn) *ReOpen { return h } +// Account sets the accounting function to account for n bytes being read +func (h *ReOpen) Account(n int) *ReOpen { + if h.account != nil { + h.account(n) + } + return h +} + // DelayAccounting makes sure the accounting function only gets called // on the i-th or later read of the data from this point (counting // from 1). diff --git a/fs/rc/rcserver/rcserver.go b/fs/rc/rcserver/rcserver.go index 7b6622c43aad7..f94724d42d31a 100644 --- a/fs/rc/rcserver/rcserver.go +++ b/fs/rc/rcserver/rcserver.go @@ -363,7 +363,7 @@ func (s *Server) serveRemote(w http.ResponseWriter, r *http.Request, path string } // Match URLS of the form [fs]/remote -var fsMatch = regexp.MustCompile(`^\[(.*?)\](.*)$`) +var fsMatch = regexp.MustCompile(`^\[([^}]*)\](.*)$`) func (s *Server) handleGet(w http.ResponseWriter, r *http.Request, path string) { // Look to see if this has an fs in the path diff --git a/lib/pool/reader_writer.go b/lib/pool/reader_writer.go index 3a14af2c03185..8ea398ddb6b96 100644 --- a/lib/pool/reader_writer.go +++ b/lib/pool/reader_writer.go @@ -87,6 +87,14 @@ func (rw *RW) SetAccounting(account RWAccount) *RW { return rw } +// Account sets the accounting function to account for n bytes being read +func (rw *RW) Account(n int) *RW { + if rw.account != nil { + rw.account(n) + } + return rw +} + // DelayAccountinger enables an accounting delay type DelayAccountinger interface { // DelayAccounting makes sure the accounting function only