From 1339a4f40e271e056f71eb0bc908c0ab2f8b8d4d Mon Sep 17 00:00:00 2001 From: Benji Date: Wed, 4 Feb 2026 16:14:16 +0100 Subject: [PATCH 1/9] merge: sync upstream rclone/rclone changes with merged backends - 119 files from upstream/master (commits after 9ae08a532) - Includes merged backend/all/all.go with backends from: - upstream/rclone (standard backends) - tgdrive/rclone (archive, drime, filen, internxt, shade, teldrive) - origin/main (alist, alldebrid, terabox, uptobox) - Excludes: CI/build files (.github, Dockerfile, Makefile, bin/) --- CONTRIBUTING.md | 2 +- README.md | 219 ++-- backend/all/all.go | Bin 2948 -> 6122 bytes backend/azureblob/azureblob.go | 7 +- backend/azurefiles/azurefiles.go | 21 +- backend/b2/b2.go | 10 +- backend/cache/cache.go | 2 +- backend/combine/combine.go | 3 - backend/doi/doi.go | 4 +- backend/doi/link_header.go | 4 +- backend/drive/drive.go | 4 +- backend/drive/metadata.go | 2 - backend/filelu/api/types.go | 2 +- backend/jottacloud/jottacloud.go | 243 ++-- backend/mailru/mailru.go | 4 +- backend/onedrive/metadata.go | 8 +- .../quickxorhash/quickxorhash_test.go | 4 +- backend/pikpak/api/types.go | 20 +- backend/pikpak/api/types_test.go | 99 ++ backend/protondrive/protondrive.go | 30 + backend/quatrix/upload_memory.go | 6 +- backend/s3/s3.go | 1064 +++++++++-------- backend/smb/filepool_test.go | 6 +- backend/smb/smb.go | 13 + backend/smb/smb_internal_test.go | 41 + backend/swift/swift.go | 23 + backend/swift/swift_test.go | 47 + backend/union/policy/epff.go | 1 - backend/webdav/api/types.go | 2 +- cmd/authorize/authorize.go | 8 +- cmd/authorize/authorize_test.go | 4 +- cmd/bisync/bilib/output.go | 6 + cmd/bisync/bisync_test.go | 6 +- cmd/bisync/compare.go | 4 +- cmd/bisync/listing.go | 3 +- cmd/bisync/queue.go | 6 +- cmd/config/config.go | 53 + cmd/convmv/convmv_test.go | 2 +- cmd/gitannex/e2e_test.go | 3 - cmd/help.go | 2 +- cmd/serve/http/http.go | 29 +- cmd/serve/http/http_test.go | 23 + cmd/serve/http/testdata/golden/root.zip | Bin 0 -> 695 bytes cmd/serve/http/testdata/golden/three.zip | Bin 0 -> 287 bytes cmd/serve/nfs/cache_test.go | 2 - cmd/serve/proxy/proxy.go | 2 +- cmd/serve/s3/logger.go | 23 +- cmd/test/info/base32768.go | 2 +- cmd/test/info/info.go | 2 +- cmd/test/info/internal/internal.go | 2 +- cmdtest/environment_test.go | 2 +- docs/content/_index.md | 7 +- docs/content/authors.md | 11 +- docs/content/box.md | 2 +- docs/content/changelog.md | 35 +- docs/content/docs.md | 4 +- docs/content/drive.md | 2 +- docs/content/dropbox.md | 2 +- docs/content/googlecloudstorage.md | 2 +- docs/content/googlephotos.md | 2 +- docs/content/hidrive.md | 2 +- docs/content/jottacloud.md | 305 +++-- docs/content/onedrive.md | 2 +- docs/content/pcloud.md | 2 +- docs/content/premiumizeme.md | 2 +- docs/content/putio.md | 2 +- docs/content/remote_setup.md | 82 +- docs/content/s3.md | 385 ++++++ docs/content/sharefile.md | 2 +- docs/content/smb.md | 2 +- docs/content/yandex.md | 2 +- docs/content/zoho.md | 2 +- docs/layouts/chrome/navbar.html | 2 +- fs/accounting/stats.go | 122 +- fs/accounting/stats_test.go | 24 + fs/bwtimetable.go | 2 +- fs/config/authorize.go | 6 +- fs/config/configmap/configmap.go | 39 +- .../configmap/configmap_external_test.go | 121 ++ fs/config/configmap/configmap_test.go | 24 - fs/config/configstruct/configstruct.go | 2 +- fs/config/flags/flags.go | 4 +- fs/config/rc.go | 1 - fs/dirtree/dirtree_test.go | 2 +- fs/list/helpers_test.go | 4 +- fs/list/sorter.go | 1 - fs/list/sorter_test.go | 2 +- fs/march/march.go | 4 +- fs/mimetype.go | 2 +- fs/operations/rc.go | 2 - fs/rc/config.go | 2 +- fs/rc/jobs/job_test.go | 5 +- fs/sync/rc.go | 1 - fs/sync/sync.go | 2 +- fs/sync/sync_transform_test.go | 2 +- fstest/runs/report.go | 43 +- fstest/runs/run.go | 2 +- fstest/test_all/config.yaml | 2 + fstest/testserver/init.d/TestSwiftAIO | 6 +- .../init.d/TestSwiftAIO.d/remakerings | 46 + fstest/testserver/init.d/TestSwiftAIOsegments | 4 +- fstest/testserver/testserver.go | 17 +- go.mod | 148 +-- go.sum | 340 +++--- lib/encoder/encoder.go | 4 +- lib/http/auth.go | 2 +- lib/http/serve/dir.go | 8 + lib/http/serve/dir_test.go | 14 +- lib/http/server.go | 4 +- lib/http/templates/index.html | 34 +- lib/mmap/mmap_test.go | 6 +- lib/oauthutil/oauthutil.go | 9 +- lib/oauthutil/renew.go | 10 +- lib/pacer/pacer.go | 45 +- lib/pacer/pacer_test.go | 43 +- lib/pool/pool_test.go | 34 +- vfs/vfstest/fs.go | 2 +- vfs/zip.go | 73 ++ vfs/zip_test.go | 160 +++ 119 files changed, 2965 insertions(+), 1399 deletions(-) create mode 100644 backend/pikpak/api/types_test.go create mode 100644 backend/smb/smb_internal_test.go create mode 100644 cmd/serve/http/testdata/golden/root.zip create mode 100644 cmd/serve/http/testdata/golden/three.zip create mode 100644 fs/config/configmap/configmap_external_test.go create mode 100755 fstest/testserver/init.d/TestSwiftAIO.d/remakerings create mode 100644 vfs/zip.go create mode 100644 vfs/zip_test.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4978d776d..9e3a565c5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -628,7 +628,7 @@ You'll need to modify the following files - `backend/s3/s3.go` - Add the provider to `providerOption` at the top of the file - Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`. - - Exclude your provider from generic config questions (eg `region` and `endpoint). + - Exclude your provider from generic config questions (eg `region` and `endpoint`). - Add the provider to the `setQuirks` function - see the documentation there. - `docs/content/s3.md` - Add the provider at the top of the page. diff --git a/README.md b/README.md index a2220f140..7af251973 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,4 @@ -> ⚠️ **Unofficial Fork Disclaimer** -> This is an **unofficial fork** of [rclone](https://rclone.org), with additional enhancements such as **Alist**, **Alldebrid**, **iCloud Photos**, **Teldrive** and **Terabox** support. -> I am **not affiliated with the upstream maintainers**, and this fork **does not intend to be malicious or harmful** in any way. -> Please **read the source code** if you're unsure or want to verify that it behaves as described. -> Contributions, feedback, and scrutiny are welcome. - ---- - -> ### 🚨 **BEWARE OF MALICIOUS FORKS** -> -> **This project is actively maintained.** -> -> There are forks (such as `jabiralam7/bclone-continued`) that were created **in advance** to falsely imply this project has been discontinued. **This is not true.** These forks are a known setup to inject malware at a later date. -> -> **Only download releases from this official repository.** Do not trust any fork claiming to be a "continuation" of bclone. - ---- - + [rclone logo](https://rclone.org/#gh-light-mode-only) [rclone logo](https://rclone.org/#gh-dark-mode-only) @@ -28,7 +11,10 @@ [Installation](https://rclone.org/install/) | [Forum](https://forum.rclone.org/) -[![Build Status](https://github.com/benjithatfoxguy/bclone/workflows/build/badge.svg)](https://github.com/benjithatfoxguy/bclone/actions?query=workflow%3Abuild) +[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild) +[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone) +[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone) +[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone) # Rclone @@ -37,101 +23,104 @@ directories to and from different cloud storage providers. ## Storage providers - * 1Fichier [:page_facing_up:](https://rclone.org/fichier/) - * Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/) - * Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) - * Alist [:page_facing_up:](https://github.com/AlistGo/alist) - * Alldebrid [:page_facing_up:](https://alldebrid.com/) - * Amazon S3 [:page_facing_up:](https://rclone.org/s3/) - * ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos) - * Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) - * Box [:page_facing_up:](https://rclone.org/box/) - * Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) - * China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) - * Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) - * Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) - * DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) - * Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) - * Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) - * Dropbox [:page_facing_up:](https://rclone.org/dropbox/) - * Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) - * Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files) - * Files.com [:page_facing_up:](https://rclone.org/filescom/) - * FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade) - * FTP [:page_facing_up:](https://rclone.org/ftp/) - * GoFile [:page_facing_up:](https://rclone.org/gofile/) - * Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) - * Google Drive [:page_facing_up:](https://rclone.org/drive/) - * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) - * HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) - * Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box) - * HiDrive [:page_facing_up:](https://rclone.org/hidrive/) - * HTTP [:page_facing_up:](https://rclone.org/http/) - * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) - * iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/) - * iCloud Photos [:page_facing_up:](https://github.com/rclone/rclone/issues/7982) - * ImageKit [:page_facing_up:](https://rclone.org/imagekit/) - * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) - * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) - * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) - * IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos) - * Koofr [:page_facing_up:](https://rclone.org/koofr/) - * Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia) - * Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage) - * Linkbox [:page_facing_up:](https://rclone.org/linkbox) - * Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode) - * Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu) - * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) - * Memset Memstore [:page_facing_up:](https://rclone.org/swift/) - * MEGA [:page_facing_up:](https://rclone.org/mega/) - * MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega) - * Memory [:page_facing_up:](https://rclone.org/memory/) - * Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) - * Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/) - * Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/) - * Minio [:page_facing_up:](https://rclone.org/s3/#minio) - * Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) - * OVH [:page_facing_up:](https://rclone.org/swift/) - * Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/) - * OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) - * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) - * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) - * Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/) - * Outscale [:page_facing_up:](https://rclone.org/s3/#outscale) - * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) - * pCloud [:page_facing_up:](https://rclone.org/pcloud/) - * Petabox [:page_facing_up:](https://rclone.org/s3/#petabox) - * PikPak [:page_facing_up:](https://rclone.org/pikpak/) - * Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/) - * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) - * put.io [:page_facing_up:](https://rclone.org/putio/) - * Proton Drive [:page_facing_up:](https://rclone.org/protondrive/) - * QingStor [:page_facing_up:](https://rclone.org/qingstor/) - * Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu) - * Quatrix [:page_facing_up:](https://rclone.org/quatrix/) - * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) - * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) - * rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net) - * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) - * Seafile [:page_facing_up:](https://rclone.org/seafile/) - * Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve) - * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) - * Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel) - * SFTP [:page_facing_up:](https://rclone.org/sftp/) - * SMB / CIFS [:page_facing_up:](https://rclone.org/smb/) - * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) - * Storj [:page_facing_up:](https://rclone.org/storj/) - * SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) - * Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2) - * Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos) - * Teldrive [📄](https://github.com/tgdrive/teldrive) - * Terabox [📄](https://terabox.com) - * Uloz.to [:page_facing_up:](https://rclone.org/ulozto/) - * Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) - * WebDAV [:page_facing_up:](https://rclone.org/webdav/) - * Yandex Disk [:page_facing_up:](https://rclone.org/yandex/) - * Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/) - * The local filesystem [:page_facing_up:](https://rclone.org/local/) +- 1Fichier [:page_facing_up:](https://rclone.org/fichier/) +- Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/) +- Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss) +- Amazon S3 [:page_facing_up:](https://rclone.org/s3/) +- ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos) +- Backblaze B2 [:page_facing_up:](https://rclone.org/b2/) +- Box [:page_facing_up:](https://rclone.org/box/) +- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) +- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) +- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2) +- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) +- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) +- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) +- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost) +- Dropbox [:page_facing_up:](https://rclone.org/dropbox/) +- Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/) +- Exaba [:page_facing_up:](https://rclone.org/s3/#exaba) +- Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files) +- FileLu [:page_facing_up:](https://rclone.org/filelu/) +- Files.com [:page_facing_up:](https://rclone.org/filescom/) +- FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade) +- FTP [:page_facing_up:](https://rclone.org/ftp/) +- GoFile [:page_facing_up:](https://rclone.org/gofile/) +- Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/) +- Google Drive [:page_facing_up:](https://rclone.org/drive/) +- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) +- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) +- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner) +- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box) +- HiDrive [:page_facing_up:](https://rclone.org/hidrive/) +- HTTP [:page_facing_up:](https://rclone.org/http/) +- Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) +- iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/) +- ImageKit [:page_facing_up:](https://rclone.org/imagekit/) +- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) +- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) +- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) +- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo) +- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos) +- Koofr [:page_facing_up:](https://rclone.org/koofr/) +- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia) +- Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage) +- Linkbox [:page_facing_up:](https://rclone.org/linkbox) +- Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode) +- Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu) +- Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) +- Memset Memstore [:page_facing_up:](https://rclone.org/swift/) +- MEGA [:page_facing_up:](https://rclone.org/mega/) +- MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega) +- Memory [:page_facing_up:](https://rclone.org/memory/) +- Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/) +- Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/) +- Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/) +- Minio [:page_facing_up:](https://rclone.org/s3/#minio) +- Nextcloud [:page_facing_up:](https://rclone.org/webdav/#nextcloud) +- Blomp Cloud Storage [:page_facing_up:](https://rclone.org/swift/) +- OpenDrive [:page_facing_up:](https://rclone.org/opendrive/) +- OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) +- Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) +- Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/) +- Outscale [:page_facing_up:](https://rclone.org/s3/#outscale) +- OVHcloud Object Storage (Swift) [:page_facing_up:](https://rclone.org/swift/) +- OVHcloud Object Storage (S3-compatible) [:page_facing_up:](https://rclone.org/s3/#ovhcloud) +- ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) +- pCloud [:page_facing_up:](https://rclone.org/pcloud/) +- Petabox [:page_facing_up:](https://rclone.org/s3/#petabox) +- PikPak [:page_facing_up:](https://rclone.org/pikpak/) +- Pixeldrain [:page_facing_up:](https://rclone.org/pixeldrain/) +- premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) +- put.io [:page_facing_up:](https://rclone.org/putio/) +- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/) +- QingStor [:page_facing_up:](https://rclone.org/qingstor/) +- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu) +- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata) +- Quatrix [:page_facing_up:](https://rclone.org/quatrix/) +- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) +- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) +- rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net) +- Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) +- Seafile [:page_facing_up:](https://rclone.org/seafile/) +- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve) +- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) +- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel) +- SFTP [:page_facing_up:](https://rclone.org/sftp/) +- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/) +- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic) +- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) +- Storj [:page_facing_up:](https://rclone.org/storj/) +- SugarSync [:page_facing_up:](https://rclone.org/sugarsync/) +- Synology C2 Object Storage [:page_facing_up:](https://rclone.org/s3/#synology-c2) +- Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos) +- Uloz.to [:page_facing_up:](https://rclone.org/ulozto/) +- Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi) +- WebDAV [:page_facing_up:](https://rclone.org/webdav/) +- Yandex Disk [:page_facing_up:](https://rclone.org/yandex/) +- Zoho WorkDrive [:page_facing_up:](https://rclone.org/zoho/) +- Zata.ai [:page_facing_up:](https://rclone.org/s3/#Zata) +- The local filesystem [:page_facing_up:](https://rclone.org/local/) Please see [the full list of all storage providers and their features](https://rclone.org/overview/) @@ -184,7 +173,7 @@ Please see the [rclone website](https://rclone.org/) for: ## Downloads - * https://github.com/BenjiThatFoxGuy/bclone/releases/latest/ +- ## License diff --git a/backend/all/all.go b/backend/all/all.go index 2f6cb125fc5ffb6bff56aaddf13e2927f38af1c9..4cdcdabdc257316f06c10a017aa63082f98d99b4 100644 GIT binary patch literal 6122 zcmcJTU2dCD5QXP8mAVJ3Pwh)BwJYcjx`PTZHrUt@E;O+hpY%Hy)23=4XB341xttl! zocX!DzHDu48~bjBRd(Wdu$t?_I(|0R+Svx{ZM4bKoBNYBd0lybuC|NqSn=N8vHpKp z`j39!B5v$szW0C+^5PplOsx2svG<&H`7GHL?1}U9-Nu9cVEqTYZMomrY^8nX>7Er^ zysdb$$EW(AtG_+o<~MCQCeY-cC-M!yMtmr47xJSfp8n|4>P-H$D7L`DHNb{4qs$f}_!0V$wVnbpsmh!TWV$!^JD6_+P)ATz z#r*vp@pvGT5gDW^BCWR}I$vwl9*vhoRJOeNS#5OWQxtZ(_ZffuiO71R9#G24N(6RK z4_236IqTa0ub<9)ege^^>ibzB(j#>Az@`pR`OgmviwZnhn>0m|ER7@9> zmNW}??zL13(w<}t=q_ipa{%8vio!j^NW@gB{gw8Gk@bGr#l_VHO;gM6HEL#CdZF^P zCJvWXBg^oCsV#%0&Hkkeg=Ea$6LPgpVqY_TILgd z9f{J{z+336N$<|nV`l>dCf$e#G+v-^Gmo_kd(y-5E(Jr3jm+6UY%0mgZQ^BXywX z`iADMk3@F*e>Gd{?lHjMIrs39jJZ;`{P(xXp2!uc#GC7xird=_WdAxc!-eXTe literal 2948 zcmb7GOOn+v3|vo6k(w23NEMub8*l?kwv|{fwq^X8nY`2E_kd#CUL{rOD6Lkjoo!ox zkUi<(l|oo0W-YBg@0;hviw1JZ%nZsmSR9E%k5f>)$Wz7{E|MCT#Er_Z z18lXMwnooyfwMYQcXV5Wq!agXP)h-15lKSnD3}j|p;6&%8Dh`*d>k_~6V*Hpw~5#o zD`x|%xv`xjmS-H0<;-+~f{r+i(xcotqG^!J$bqTGb8n&`p [base64_json_blob | client_id client_secret]", + Use: "authorize [base64_json_blob | client_id client_secret]", Short: `Remote authorization.`, Long: `Remote authorization. Used to authorize a remote or headless -rclone from a machine with a browser - use as instructed by -rclone config. +rclone from a machine with a browser. Use as instructed by rclone config. +See also the [remote setup documentation](/remote_setup). The command requires 1-3 arguments: -- fs name (e.g., "drive", "s3", etc.) +- Name of a backend (e.g. "drive", "s3") - Either a base64 encoded JSON blob obtained from a previous rclone config session - Or a client_id and client_secret pair obtained from the remote service diff --git a/cmd/authorize/authorize_test.go b/cmd/authorize/authorize_test.go index 364da5ed1..eace8b6e3 100644 --- a/cmd/authorize/authorize_test.go +++ b/cmd/authorize/authorize_test.go @@ -10,7 +10,7 @@ import ( func TestAuthorizeCommand(t *testing.T) { // Test that the Use string is correctly formatted - if commandDefinition.Use != "authorize [base64_json_blob | client_id client_secret]" { + if commandDefinition.Use != "authorize [base64_json_blob | client_id client_secret]" { t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use) } @@ -26,7 +26,7 @@ func TestAuthorizeCommand(t *testing.T) { } helpOutput := buf.String() - if !strings.Contains(helpOutput, "authorize ") { + if !strings.Contains(helpOutput, "authorize ") { t.Errorf("Help output doesn't contain correct usage information") } } diff --git a/cmd/bisync/bilib/output.go b/cmd/bisync/bilib/output.go index 00ac8d78e..f83b6bd07 100644 --- a/cmd/bisync/bilib/output.go +++ b/cmd/bisync/bilib/output.go @@ -4,15 +4,19 @@ package bilib import ( "bytes" "log/slog" + "sync" "github.com/rclone/rclone/fs/log" ) // CaptureOutput runs a function capturing its output at log level INFO. func CaptureOutput(fun func()) []byte { + var mu sync.Mutex buf := &bytes.Buffer{} oldLevel := log.Handler.SetLevel(slog.LevelInfo) log.Handler.SetOutput(func(level slog.Level, text string) { + mu.Lock() + defer mu.Unlock() buf.WriteString(text) }) defer func() { @@ -20,5 +24,7 @@ func CaptureOutput(fun func()) []byte { log.Handler.SetLevel(oldLevel) }() fun() + mu.Lock() + defer mu.Unlock() return buf.Bytes() } diff --git a/cmd/bisync/bisync_test.go b/cmd/bisync/bisync_test.go index 0fc680e82..f7c05d5e8 100644 --- a/cmd/bisync/bisync_test.go +++ b/cmd/bisync/bisync_test.go @@ -522,7 +522,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str require.NoError(b.t, err) b.step = 0 b.stopped = false - for _, line := range strings.Split(string(scenBuf), "\n") { + for line := range strings.SplitSeq(string(scenBuf), "\n") { comment := strings.Index(line, "#") if comment != -1 { line = line[:comment] @@ -936,7 +936,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) { // splitLine splits scenario line into tokens and performs // substitutions that involve whitespace or control chars. func splitLine(line string) (args []string) { - for _, s := range strings.Fields(line) { + for s := range strings.FieldsSeq(line) { b := []byte(whitespaceReplacer.Replace(s)) b = regexChar.ReplaceAllFunc(b, func(b []byte) []byte { c, _ := strconv.ParseUint(string(b[5:7]), 16, 8) @@ -1513,7 +1513,7 @@ func (b *bisyncTest) compareResults() int { fs.Log(nil, divider) fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file) - for _, line := range strings.Split(strings.TrimSpace(text), "\n") { + for line := range strings.SplitSeq(strings.TrimSpace(text), "\n") { fs.Logf(nil, "| %s", strings.TrimSpace(line)) } } diff --git a/cmd/bisync/compare.go b/cmd/bisync/compare.go index cf69b5d1c..9334bec7f 100644 --- a/cmd/bisync/compare.go +++ b/cmd/bisync/compare.go @@ -219,8 +219,8 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error { return nil } var CompareFlag CompareOpt // for exclusions - opts := strings.Split(b.opt.CompareFlag, ",") - for _, opt := range opts { + opts := strings.SplitSeq(b.opt.CompareFlag, ",") + for opt := range opts { switch strings.ToLower(strings.TrimSpace(opt)) { case "size": b.opt.Compare.Size = true diff --git a/cmd/bisync/listing.go b/cmd/bisync/listing.go index b316ceb0d..1a15fe56a 100644 --- a/cmd/bisync/listing.go +++ b/cmd/bisync/listing.go @@ -707,8 +707,7 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res prettyprint(dstList.list, "dstList", fs.LogLevelDebug) // clear stats so we only do this once - accounting.MaxCompletedTransfers = 0 - accounting.Stats(ctx).PruneTransfers() + accounting.Stats(ctx).RemoveDoneTransfers() } if b.DebugName != "" { diff --git a/cmd/bisync/queue.go b/cmd/bisync/queue.go index ed0257b5d..58588baef 100644 --- a/cmd/bisync/queue.go +++ b/cmd/bisync/queue.go @@ -245,10 +245,8 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib. } } - b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown - if accounting.MaxCompletedTransfers != -1 { - accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown - } + b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown + accounting.Stats(ctxCopy).SetMaxCompletedTransfers(-1) // we need a complete list in the event of graceful shutdown ctxCopy, b.CancelSync = context.WithCancel(ctxCopy) b.testFn() err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs) diff --git a/cmd/config/config.go b/cmd/config/config.go index e9874c29f..c06dff3e3 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -37,6 +37,7 @@ func init() { configCommand.AddCommand(configDisconnectCommand) configCommand.AddCommand(configUserInfoCommand) configCommand.AddCommand(configEncryptionCommand) + configCommand.AddCommand(configStringCommand) } var configCommand = &cobra.Command{ @@ -613,3 +614,55 @@ If the config file is not encrypted it will return a non zero exit code.`, "|", return nil }, } + +var configStringCommand = &cobra.Command{ + Use: "string ", + Short: `Print connection string for a single remote.`, + Long: strings.ReplaceAll(`Print a connection string for a single remote. + +The [connection strings](/docs/#connection-strings) can be used +wherever a remote is needed and can be more convenient than using the +config file, especially if using the RC API. + +Backend parameters may be provided to the command also. + +Example: + +|||sh +$ rclone config string s3:rclone --s3-no-check-bucket +:s3,access_key_id=XXX,no_check_bucket,provider=AWS,region=eu-west-2,secret_access_key=YYY:rclone +||| + +**NB** the strings are not quoted for use in shells (eg bash, +powershell, windows cmd). Most will work if enclosed in "double +quotes", however connection strings that contain double quotes will +require further quoting which is very shell dependent. + +`, "|", "`"), + Annotations: map[string]string{ + "versionIntroduced": "v1.72", + }, + RunE: func(command *cobra.Command, args []string) error { + cmd.CheckArgs(1, 1, command, args) + remote := args[0] + fsInfo, _, fsPath, m, err := fs.ConfigFs(remote) + if err != nil { + return err + } + + // Find the overridden options and construct the string + overridden := fsInfo.Options.NonDefault(m) + var out strings.Builder + out.WriteRune(':') + out.WriteString(fsInfo.Name) + config := overridden.Human() + if config != "" { + out.WriteRune(',') + out.WriteString(config) + } + out.WriteRune(':') + out.WriteString(fsPath) + fmt.Println(out.String()) + return nil + }, +} diff --git a/cmd/convmv/convmv_test.go b/cmd/convmv/convmv_test.go index 8e0ec4d5d..73fd45943 100644 --- a/cmd/convmv/convmv_test.go +++ b/cmd/convmv/convmv_test.go @@ -152,7 +152,7 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item { items := []fstest.Item{} for _, c := range alphabet { var out strings.Builder - for i := rune(0); i < 7; i++ { + for i := range rune(7) { out.WriteRune(c + i) } fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String())) diff --git a/cmd/gitannex/e2e_test.go b/cmd/gitannex/e2e_test.go index be386d447..ffc630100 100644 --- a/cmd/gitannex/e2e_test.go +++ b/cmd/gitannex/e2e_test.go @@ -229,7 +229,6 @@ func TestEndToEnd(t *testing.T) { skipE2eTestIfNecessary(t) for _, mode := range allLayoutModes() { - mode := mode t.Run(string(mode), func(t *testing.T) { t.Parallel() @@ -258,7 +257,6 @@ func TestEndToEndMigration(t *testing.T) { } for _, mode := range allLayoutModes() { - mode := mode t.Run(string(mode), func(t *testing.T) { t.Parallel() @@ -318,7 +316,6 @@ func TestEndToEndRepoLayoutCompat(t *testing.T) { } for _, mode := range allLayoutModes() { - mode := mode t.Run(string(mode), func(t *testing.T) { t.Parallel() diff --git a/cmd/help.go b/cmd/help.go index ff449b8c8..a99f54ce9 100644 --- a/cmd/help.go +++ b/cmd/help.go @@ -344,7 +344,7 @@ func showBackend(name string) { } for _, ex := range opt.Examples { fmt.Printf(" - %s\n", quoteString(ex.Value)) - for _, line := range strings.Split(ex.Help, "\n") { + for line := range strings.SplitSeq(ex.Help, "\n") { fmt.Printf(" - %s\n", line) } } diff --git a/cmd/serve/http/http.go b/cmd/serve/http/http.go index 92f1b02f2..e45c0ab85 100644 --- a/cmd/serve/http/http.go +++ b/cmd/serve/http/http.go @@ -41,9 +41,10 @@ var OptionsInfo = fs.Options{}. // Options required for http server type Options struct { - Auth libhttp.AuthConfig - HTTP libhttp.Config - Template libhttp.TemplateConfig + Auth libhttp.AuthConfig + HTTP libhttp.Config + Template libhttp.TemplateConfig + DisableZip bool } // DefaultOpt is the default values used for Options @@ -69,6 +70,7 @@ func init() { flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) vfsflags.AddFlags(flagSet) proxyflags.AddFlags(flagSet) + flagSet.BoolVar(&Opt.DisableZip, "disable-zip", false, "Disable zip download of directories") cmdserve.Command.AddCommand(Command) cmdserve.AddRc("http", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) { // Read VFS Opts @@ -208,6 +210,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt // Serve HTTP until the server is shutdown func (s *HTTP) Serve() error { s.server.Serve() + fs.Logf(s.f, "HTTP Server started on %s", s.server.URLs()) s.server.Wait() return nil } @@ -256,6 +259,24 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string return } dir := node.(*vfs.Dir) + + if r.URL.Query().Get("download") == "zip" && !s.opt.DisableZip { + fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr) + zipName := path.Base(dirRemote) + if dirRemote == "" { + zipName = "root" + } + w.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"") + w.Header().Set("Content-Type", "application/zip") + w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat)) + err := vfs.CreateZip(ctx, dir, w) + if err != nil { + serve.Error(ctx, dirRemote, w, "Failed to create zip", err) + return + } + return + } + dirEntries, err := dir.ReadDirAll() if err != nil { serve.Error(ctx, dirRemote, w, "Failed to list directory", err) @@ -279,6 +300,8 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string // Set the Last-Modified header to the timestamp w.Header().Set("Last-Modified", dir.ModTime().UTC().Format(http.TimeFormat)) + directory.DisableZip = s.opt.DisableZip + directory.Serve(w, r) } diff --git a/cmd/serve/http/http_test.go b/cmd/serve/http/http_test.go index 3044c141a..5d8ccdc06 100644 --- a/cmd/serve/http/http_test.go +++ b/cmd/serve/http/http_test.go @@ -4,6 +4,7 @@ import ( "context" "flag" "io" + stdfs "io/fs" "net/http" "os" "path/filepath" @@ -75,6 +76,16 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string) return s, testURL } +// setAllModTimes walks root and sets atime/mtime to t for every file & directory. +func setAllModTimes(root string, t time.Time) error { + return filepath.WalkDir(root, func(path string, d stdfs.DirEntry, err error) error { + if err != nil { + return err + } + return os.Chtimes(path, t, t) + }) +} + var ( datedObject = "two.txt" expectedTime = time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC) @@ -123,6 +134,8 @@ func testGET(t *testing.T, useProxy bool) { f = nil } else { + // set all the mod times to expectedTime + require.NoError(t, setAllModTimes("testdata/files", expectedTime)) // Create a test Fs var err error f, err = fs.NewFs(context.Background(), "testdata/files") @@ -233,6 +246,16 @@ func testGET(t *testing.T, useProxy bool) { Range: "bytes=3-", Golden: "testdata/golden/two3-.txt", }, + { + URL: "/?download=zip", + Status: http.StatusOK, + Golden: "testdata/golden/root.zip", + }, + { + URL: "/three/?download=zip", + Status: http.StatusOK, + Golden: "testdata/golden/three.zip", + }, } { method := test.Method if method == "" { diff --git a/cmd/serve/http/testdata/golden/root.zip b/cmd/serve/http/testdata/golden/root.zip new file mode 100644 index 0000000000000000000000000000000000000000..a3a710df4b5a20a39c84dff50d6e070b0c6abbe0 GIT binary patch literal 695 zcmWIWW@Zs#-~hrV2_+2%B*4MI$&jCys;XB~Q4$)$%D||4AkX5|dGFAtOe_ox|NjSg zvvV{rZ+74Y>R|=q0I*3Q1JF%k1DaHlQIwjh4>t#*7@M`+aK(uzHfx>K@bX2n`o>YW z|2#m;*x**RDcoWFzp~^*g_Ve-3!RX2xnBgkwXm>(g;uv*NhtO zFzaEViyTm>CLD#EfEw25CLo6mC_oWFgc0ZgwBQZ!W(7qtFp04;*a78}ftY~-0E(e> AjQ{`u literal 0 HcmV?d00001 diff --git a/cmd/serve/http/testdata/golden/three.zip b/cmd/serve/http/testdata/golden/three.zip new file mode 100644 index 0000000000000000000000000000000000000000..bcffe9819f1359f4deb9c8043fe520283318ea86 GIT binary patch literal 287 zcmWIWW@Zs#-~hrV2_+2%B*4nR$&jd5Qc)5b!pgv?dLYk2>!gO4?^7lg28RFt1H9Qe zZX9*{&jZxK2E+k)%}K&E$M+G!Ao<@c;=DjZ*ue%dGKnxCoP=x%$VsRG)qw%tsCtpj cMb+y8GzQgC0p6@2e=slsVFi$$2;wjR08wx(@Bjb+ literal 0 HcmV?d00001 diff --git a/cmd/serve/nfs/cache_test.go b/cmd/serve/nfs/cache_test.go index 6941fde17..2cc4d961e 100644 --- a/cmd/serve/nfs/cache_test.go +++ b/cmd/serve/nfs/cache_test.go @@ -66,7 +66,6 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) { func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) { var wg sync.WaitGroup for i := range 100 { - i := i wg.Add(1) go func() { defer wg.Done() @@ -125,7 +124,6 @@ func TestCache(t *testing.T) { }() billyFS := &FS{nil} // place holder billyFS for _, cacheType := range []handleCache{cacheMemory, cacheDisk, cacheSymlink} { - cacheType := cacheType t.Run(cacheType.String(), func(t *testing.T) { h := &Handler{ vfs: vfs.New(object.MemoryFs, nil), diff --git a/cmd/serve/proxy/proxy.go b/cmd/serve/proxy/proxy.go index 7acc0ce7a..efe2fb459 100644 --- a/cmd/serve/proxy/proxy.go +++ b/cmd/serve/proxy/proxy.go @@ -182,7 +182,7 @@ func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) { // Obscure any values in the config map that need it obscureFields, ok := config.Get("_obscure") if ok { - for _, key := range strings.Split(obscureFields, ",") { + for key := range strings.SplitSeq(obscureFields, ",") { value, ok := config.Get(key) if ok { obscuredValue, err := obscure.Obscure(value) diff --git a/cmd/serve/s3/logger.go b/cmd/serve/s3/logger.go index d99e27df7..cb0337f95 100644 --- a/cmd/serve/s3/logger.go +++ b/cmd/serve/s3/logger.go @@ -2,6 +2,7 @@ package s3 import ( "fmt" + "strings" "github.com/rclone/gofakes3" "github.com/rclone/rclone/fs" @@ -12,25 +13,23 @@ type logger struct{} // print log message func (l logger) Print(level gofakes3.LogLevel, v ...any) { - var s string - if len(v) == 0 { - s = "" - } else { - var ok bool - s, ok = v[0].(string) - if !ok { - s = fmt.Sprint(v[0]) + var b strings.Builder + for i := range v { + if i > 0 { + fmt.Fprintf(&b, " ") } - v = v[1:] + fmt.Fprint(&b, v[i]) } + s := b.String() + switch level { default: fallthrough case gofakes3.LogErr: - fs.Errorf("serve s3", s, v...) + fs.Errorf("serve s3", s) case gofakes3.LogWarn: - fs.Infof("serve s3", s, v...) + fs.Infof("serve s3", s) case gofakes3.LogInfo: - fs.Debugf("serve s3", s, v...) + fs.Debugf("serve s3", s) } } diff --git a/cmd/test/info/base32768.go b/cmd/test/info/base32768.go index 23949a2e1..dac3f3134 100644 --- a/cmd/test/info/base32768.go +++ b/cmd/test/info/base32768.go @@ -34,7 +34,7 @@ func (r *results) checkBase32768() { // Create test files for _, c := range safeAlphabet { var out strings.Builder - for i := rune(0); i < 32; i++ { + for i := range rune(32) { out.WriteRune(c + i) } fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String())) diff --git a/cmd/test/info/info.go b/cmd/test/info/info.go index 9f0b9be8d..0e709f09f 100644 --- a/cmd/test/info/info.go +++ b/cmd/test/info/info.go @@ -292,7 +292,7 @@ func (r *results) checkControls() { tokens <- struct{}{} } var wg sync.WaitGroup - for i := rune(0); i < 128; i++ { + for i := range rune(128) { s := string(i) if i == 0 || i == '/' { // We're not even going to check NULL or / diff --git a/cmd/test/info/internal/internal.go b/cmd/test/info/internal/internal.go index 7af06b87a..ce06bbe2f 100644 --- a/cmd/test/info/internal/internal.go +++ b/cmd/test/info/internal/internal.go @@ -95,7 +95,7 @@ func (e *Position) UnmarshalText(text []byte) error { switch s := strings.ToLower(string(text)); s { default: *e = PositionNone - for _, p := range strings.Split(s, ",") { + for p := range strings.SplitSeq(s, ",") { switch p { case "left": *e |= PositionLeft diff --git a/cmdtest/environment_test.go b/cmdtest/environment_test.go index 86ab0b4de..9e4f660d2 100644 --- a/cmdtest/environment_test.go +++ b/cmdtest/environment_test.go @@ -351,7 +351,7 @@ func TestEnvironmentVariables(t *testing.T) { parseFileFilters := func(out string) (extensions []string) { // Match: - (^|/)[^/]*\.jpg$ find := regexp.MustCompile(`^- \(\^\|\/\)\[\^\/\]\*\\\.(.*?)\$$`) - for _, line := range strings.Split(out, "\n") { + for line := range strings.SplitSeq(out, "\n") { if m := find.FindStringSubmatch(line); m != nil { extensions = append(extensions, m[1]) } diff --git a/docs/content/_index.md b/docs/content/_index.md index 06c200cf3..dcc4b424d 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -125,6 +125,7 @@ WebDAV or S3, that work out of the box.) {{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}} {{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}} {{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}} +{{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}} {{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}} {{< provider name="FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}} {{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}} @@ -133,9 +134,11 @@ WebDAV or S3, that work out of the box.) {{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}} {{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}} {{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}} +{{< provider name="Hetzner Object Storage" home="https://www.hetzner.com/storage/object-storage/" config="/s3/#hetzner" >}} {{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}} {{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}} {{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}} +{{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}} {{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}} {{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}} {{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}} @@ -179,7 +182,10 @@ WebDAV or S3, that work out of the box.) {{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}} {{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}} {{< provider name="Quatrix by Maytech" home="https://www.maytech.net/products/quatrix-business" config="/quatrix/" >}} +{{< provider name="Rabata Cloud Storage" home="https://rabata.io" config="/s3/#Rabata" >}} +{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}} {{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}} +{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}} {{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}} {{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}} {{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}} @@ -194,7 +200,6 @@ WebDAV or S3, that work out of the box.) {{< provider name="Storj" home="https://storj.io/" config="/storj/" >}} {{< provider name="Synology" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}} {{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}} -{{< provider name="Terabox" home="https://www.terabox.com/" config="/terabox/" >}} {{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}} {{< provider name="Uloz.to" home="https://uloz.to" config="/ulozto/" >}} {{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}} diff --git a/docs/content/authors.md b/docs/content/authors.md index b61a11e90..c46df27b6 100644 --- a/docs/content/authors.md +++ b/docs/content/authors.md @@ -980,7 +980,6 @@ put them back in again.` >}} - Flora Thiebaut - kingston125 - Ser-Bul <30335009+Ser-Bul@users.noreply.github.com> - * Benji Silver - jinjingroad - necaran <55765083+necaran@users.noreply.github.com> - Marvin Rösch @@ -991,7 +990,7 @@ put them back in again.` >}} - Ross Smith II - Vikas Bhansali <64532198+vibhansa-msft@users.noreply.github.com> - Sudipto Baral -- Sam Pegg +- Sam Pegg <70067376+S-Pegg1@users.noreply.github.com> - liubingrun - Albin Parou - n4n5 <56606507+Its-Just-Nans@users.noreply.github.com> @@ -1012,3 +1011,11 @@ put them back in again.` >}} - dougal <147946567+roucc@users.noreply.github.com> - anon-pradip - Robin Rolf +- Jean-Christophe Cura +- russcoss +- Matt LaPaglia +- Youfu Zhang <1315097+zhangyoufu@users.noreply.github.com> +- juejinyuxitu +- iTrooz +- Microscotch +- Andrew Ruthven diff --git a/docs/content/box.md b/docs/content/box.md index 85bc27a97..aaa19d579 100644 --- a/docs/content/box.md +++ b/docs/content/box.md @@ -84,7 +84,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from Box. This only runs from the moment it opens diff --git a/docs/content/changelog.md b/docs/content/changelog.md index 57a7cf6f8..5ebe40fb5 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -6,9 +6,34 @@ description: "Rclone Changelog" # Changelog -## v1.73.0 - 2025-08-22 +## v1.71.1 - 2025-09-24 -[See commits](https://github.com/rclone/rclone/compare/v1.70.0...v1.73.0) +[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.71.1) + +- Bug Fixes + - bisync: Fix error handling for renamed conflicts (nielash) + - march: Fix deadlock when using --fast-list on syncs (Nick Craig-Wood) + - operations: Fix partial name collisions for non --inplace copies (Nick Craig-Wood) + - pacer: Fix deadlock with --max-connections (Nick Craig-Wood) + - doc fixes (albertony, anon-pradip, Claudius Ellsel, dougal, Jean-Christophe Cura, Nick Craig-Wood, nielash) +- Mount + - Do not log successful unmount as an error (Tilman Vogel) +- VFS + - Fix SIGHUP killing serve instead of flushing directory caches (dougal) +- Local + - Fix rmdir "Access is denied" on windows (nielash) +- Box + - Fix about after change in API return (Nick Craig-Wood) +- Combine + - Propagate SlowHash feature (skbeh) +- Drive + - Update making your own client ID instructions (Ed Craig-Wood) +- Internet Archive + - Fix server side copy files with spaces (Nick Craig-Wood) + +## v1.71.0 - 2025-08-22 + +[See commits](https://github.com/rclone/rclone/compare/v1.70.0...v1.71.0) - New S3 providers - [Exaba](/s3/#exaba) (Nick Craig-Wood) @@ -147,15 +172,15 @@ description: "Rclone Changelog" ## v1.70.1 - 2025-06-19 -[See commits](https://github.com/rclone/rclone/compare/v1.73.0...v1.70.1) +[See commits](https://github.com/rclone/rclone/compare/v1.70.0...v1.70.1) - Bug Fixes - convmv: Fix spurious "error running command echo" on Windows (Nick Craig-Wood) - doc fixes (albertony, Ed Craig-Wood, jinjingroad) -## v1.73.0 - 2025-06-17 +## v1.70.0 - 2025-06-17 -[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.73.0) +[See commits](https://github.com/rclone/rclone/compare/v1.69.0...v1.70.0) - New backends - [DOI](/doi/) (Flora Thiebaut) diff --git a/docs/content/docs.md b/docs/content/docs.md index 77f386277..dd27f9d44 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -85,7 +85,6 @@ See the following for detailed instructions for - [SMB](/smb/) - [Storj](/storj/) - [SugarSync](/sugarsync/) -- [Terabox](/terabox/) - [Union](/union/) - [Uloz.to](/ulozto/) - [Uptobox](/uptobox/) @@ -385,6 +384,9 @@ does not work on Windows.) rclone copy ':http,url="https://example.com":path/to/dir' /tmp/dir ``` +You can use [rclone config string](/commands/rclone_config_string/) to +convert a remote into a connection string. + #### Connection strings, config and logging If you supply extra configuration to a backend by command line flag, diff --git a/docs/content/drive.md b/docs/content/drive.md index de1c3196c..5b3579635 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -96,7 +96,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from Google if using web browser to automatically diff --git a/docs/content/dropbox.md b/docs/content/dropbox.md index 71eccb7b4..327c68b8e 100644 --- a/docs/content/dropbox.md +++ b/docs/content/dropbox.md @@ -60,7 +60,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from Dropbox. This only diff --git a/docs/content/googlecloudstorage.md b/docs/content/googlecloudstorage.md index c403a1d2e..a7596b8f5 100644 --- a/docs/content/googlecloudstorage.md +++ b/docs/content/googlecloudstorage.md @@ -147,7 +147,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from Google if using web browser to automatically diff --git a/docs/content/googlephotos.md b/docs/content/googlephotos.md index 102f2d039..2b87b235a 100644 --- a/docs/content/googlephotos.md +++ b/docs/content/googlephotos.md @@ -97,7 +97,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from Google if using web browser to automatically diff --git a/docs/content/hidrive.md b/docs/content/hidrive.md index 8e990e93e..f94eb80c4 100644 --- a/docs/content/hidrive.md +++ b/docs/content/hidrive.md @@ -72,7 +72,7 @@ and hence should not be shared with other persons.** See the [below section](#keeping-your-tokens-safe) for more information. See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from HiDrive. This only runs from the moment it opens diff --git a/docs/content/jottacloud.md b/docs/content/jottacloud.md index e4dab510b..e0765c7e5 100644 --- a/docs/content/jottacloud.md +++ b/docs/content/jottacloud.md @@ -7,106 +7,171 @@ versionIntroduced: "v1.43" # {{< icon "fa fa-cloud" >}} Jottacloud Jottacloud is a cloud storage service provider from a Norwegian company, using -its own datacenters in Norway. In addition to the official service at -[jottacloud.com](https://www.jottacloud.com/), it also provides white-label -solutions to different companies, such as: +its own datacenters in Norway. +In addition to the official service at [jottacloud.com](https://www.jottacloud.com/), +it also provides white-label solutions to different companies. The following +are currently supported by this backend, using a different authentication setup +as described [below](#whitelabel-authentication): + +- Elkjøp (with subsidiaries): + - Elkjøp Cloud (cloud.elkjop.no) + - Elgiganten Cloud (cloud.elgiganten.dk) + - Elgiganten Cloud (cloud.elgiganten.se) + - ELKO Cloud (cloud.elko.is) + - Gigantti Cloud (cloud.gigantti.fi) - Telia - Telia Cloud (cloud.telia.se) - Telia Sky (sky.telia.no) - Tele2 - Tele2 Cloud (mittcloud.tele2.se) - Onlime - - Onlime Cloud Storage (onlime.dk) -- Elkjøp (with subsidiaries): - - Elkjøp Cloud (cloud.elkjop.no) - - Elgiganten Sweden (cloud.elgiganten.se) - - Elgiganten Denmark (cloud.elgiganten.dk) - - Giganti Cloud (cloud.gigantti.fi) - - ELKO Cloud (cloud.elko.is) - -Most of the white-label versions are supported by this backend, although may -require different authentication setup - described below. + - Onlime (onlime.dk) +- MediaMarkt + - MediaMarkt Cloud (mediamarkt.jottacloud.com) + - Let's Go Cloud (letsgo.jotta.cloud) Paths are specified as `remote:path` Paths may be as deep as required, e.g. `remote:directory/subdirectory`. -## Authentication types - -Some of the whitelabel versions uses a different authentication method than the -official service, and you have to choose the correct one when setting up the remote. - -### Standard authentication - -The standard authentication method used by the official service (jottacloud.com), -as well as some of the whitelabel services, requires you to generate a single-use -personal login token from the account security settings in the service's web -interface. Log in to your account, go to "Settings" and then "Security", or use -the direct link presented to you by rclone when configuring the remote: -. Scroll down to the section "Personal login -token", and click the "Generate" button. Note that if you are using a whitelabel -service you probably can't use the direct link, you need to find the same page in -their dedicated web interface, and also it may be in a different location than -described above. - -To access your account from multiple instances of rclone, you need to configure -each of them with a separate personal login token. E.g. you create a Jottacloud -remote with rclone in one location, and copy the configuration file to a second -location where you also want to run rclone and access the same remote. Then you -need to replace the token for one of them, using the [config reconnect](https://rclone.org/commands/rclone_config_reconnect/) -command, which requires you to generate a new personal login token and supply -as input. If you do not do this, the token may easily end up being invalidated, -resulting in both instances failing with an error message something along the +## Authentication + +Authentication in Jottacloud is in general based on OAuth and OpenID Connect +(OIDC). There are different variants to choose from, depending on which service +you are using, e.g. a white-label service may only support one of them. Note +that there is no documentation to rely on, so the descriptions provided here +are based on observations and may not be accurate. + +Jottacloud uses two optional OAuth security mechanisms, referred to as "Refresh +Token Rotation" and "Automatic Reuse Detection", which has some implications. +Access tokens normally have one hour expiry, after which they need to be +refreshed (rotated), an operation that requires the refresh token to be +supplied. Rclone does this automatically. This is standard OAuth. But in +Jottacloud, such a refresh operation not only creates a new access token, but +also refresh token, and invalidates the existing refresh token, the one that +was supplied. It keeps track of the history of refresh tokens, sometimes +referred to as a token family, descending from the original refresh token that +was issued after the initial authentication. This is used to detect any +attempts at reusing old refresh tokens, and trigger an immedate invalidation of +the current refresh token, and effectively the entire refresh token family. + +When the current refresh token has been invalidated, next time rclone tries to +perform a token refresh, it will fail with an error message something along the lines of: ```text - oauth2: cannot fetch token: 400 Bad Request - Response: {"error":"invalid_grant","error_description":"Stale token"} +CRITICAL: Failed to create file system for "remote:": (...): couldn't fetch token: invalid_grant: maybe token expired? - try refreshing with "rclone config reconnect remote:" ``` -When this happens, you need to replace the token as described above to be able -to use your remote again. - -All personal login tokens you have taken into use will be listed in the web -interface under "My logged in devices", and from the right side of that list -you can click the "X" button to revoke individual tokens. - -### Legacy authentication - -If you are using one of the whitelabel versions (e.g. from Elkjøp) you may not -have the option to generate a CLI token. In this case you'll have to use the -legacy authentication. To do this select yes when the setup asks for legacy -authentication and enter your username and password. The rest of the setup is -identical to the default setup. - -### Telia Cloud authentication - -Similar to other whitelabel versions Telia Cloud doesn't offer the option of -creating a CLI token, and additionally uses a separate authentication flow -where the username is generated internally. To setup rclone to use Telia Cloud, -choose Telia Cloud authentication in the setup. The rest of the setup is -identical to the default setup. - -### Tele2 Cloud authentication - -As Tele2-Com Hem merger was completed this authentication can be used for former -Com Hem Cloud and Tele2 Cloud customers as no support for creating a CLI token -exists, and additionally uses a separate authentication flow where the username -is generated internally. To setup rclone to use Tele2 Cloud, choose Tele2 Cloud -authentication in the setup. The rest of the setup is identical to the default setup. - -### Onlime Cloud Storage authentication - -Onlime has sold access to Jottacloud proper, while providing localized support -to Danish Customers, but have recently set up their own hosting, transferring -their customers from Jottacloud servers to their own ones. +If you run rclone with verbosity level 2 (`-vv`), you will see a debug message +with an additional error description from the OAuth response: -This, of course, necessitates using their servers for authentication, but -otherwise functionality and architecture seems equivalent to Jottacloud. +```text +DEBUG : remote: got fatal oauth error: oauth2: "invalid_grant" "Session doesn't have required client" +``` -To setup rclone to use Onlime Cloud Storage, choose Onlime Cloud authentication -in the setup. The rest of the setup is identical to the default setup. +(The error description used to be "Stale token" instead of "Session doesn't +have required client", so you may see references to that in older descriptions +of this situation.) + +When this happens, you need to re-authenticate to be able to use your remote +again, e.g. using the [config reconnect](/commands/rclone_config_reconnect/) +command as suggested in the error message. This will create an entirely new +refresh token (family). + +A typical example of how you may end up in this situation, is if you create +a Jottacloud remote with rclone in one location, and then copy the +configuration file to a second location where you start using rclone to access +the same remote. Eventually there will now be a token refresh attempt with an +invalidated token, i.e. refresh token reuse, resulting in both instances +starting to fail with the "invalid_grant" error. It is possible to copy remote +configurations, but you must then replace the token for one of them using the +[config reconnect](https://rclone.org/commands/rclone_config_reconnect/) +command. + +You can get some overview of your active tokens in your service's web user +interface, if you navigate to "Settings" and then "Security" (in which case +you end up at or similar). Down on +that page you have a section "My logged in devices". This contains a list +of entries which seemingly represents currently valid refresh tokens, or +refresh token families. From the right side of that list you can click a +button ("X") to revoke (invalidate) it, which means you will still have access +using an existing access token until that expires, but you will not be able to +perform a token refresh. Note that this entire "My logged in devices" feature +seem to behave a bit differently with different authentication variants and +with use of the different (white-label) services. + +### Standard + +This is an OAuth variant designed for command-line applications. It is +primarily supported by the official service (jottacloud.com), but may also be +supported by some of the white-label services. The information necessary to be +able to perform authentication, like domain name and endpoint to connect to, +are found automatically (it is encoded into the supplied login token, described +next), so you do not need to specify which service to configure. + +When configuring a remote, you are asked to enter a single-use personal login +token, which you must manually generate from the account security settings in +the service's web interface. You do not need a web browser on the same machine +like with traditional OAuth, but need to use a web browser somewhere, and be +able to be copy the generated string into your rclone configuration session. +Log in to your service's web user interface, navigate to "Settings" and then +"Security", or, for the official service, use the direct link presented to you +by rclone when configuring the remote: . +Scroll down to the section "Personal login token", and click the "Generate" +button. Copy the presented string and paste it where rclone asks for it. Rclone +will then use this to perform an initial token request, and receive a regular +OAuth token which it stores in your remote configuration. There will then also +be a new entry in the "My logged in devices" list in the web interface, with +device name and application name "Jottacloud CLI". + +Each time a new token is created this way, i.e. a new personal login token is +generated and traded in for an OAuth token, you get an entirely new refresh +token family, with a new entry in the "My logged in devices". You can create as +many remotes as you want, and use multiple instances of rclone on same or +different machine, as long as you configure them separately like this, and not +get your self into the refresh token reuse issue described above. + +### Traditional + +Jottacloud also supports a more traditional OAuth variant. Most of the +white-label services support this, and for many of them this is the only +alternative because they do not support personal login tokens. This method +relies on pre-defined service-specific domain names and endpoints, and rclone +need you to specify which service to configure. This also means that any +changes to existing or additions of new white-label services needs an update +in the rclone backend implementation. + +When configuring a remote, you must interactively login to an OAuth +authorization web site, and a one-time authorization code is sent back to +rclone behind the scene, which it uses to request an OAuth token. This means +that you need to be on a machine with an internet-connected web browser. If you +need it on a machine where this is not the case, then you will have to create +the configuration on a different machine and copy it from there. The Jottacloud +backend does not support the `rclone authorize` command. See the +[remote setup docs](/remote_setup) for details. + +Jottacloud exerts some form of strict session management when authenticating +using this method. This leads to some unexpected cases of the "invalid_grant" +error described above, and effectively limits you to only use of a single +active authentication on the same machine. I.e. you can only create a single +rclone remote, and you can't even log in with the service's official desktop +client while having a rclone remote configured, or else you will eventually get +all sessions invalidated and are forced to re-authenticate. + +When you have successfully authenticated, there will be an entry in the +"My logged in devices" list in the web interface representing your session. It +will typically be listed with application name "Jottacloud for Desktop" or +similar (it depends on the white-label service configuration). + +### Legacy + +Originally Jottacloud used an OAuth variant which required your account's +username and password to be specified. When Jottacloud migrated to the newer +methods, some white-label versions (those from Elkjøp) still used this legacy +method for a long time. Currently there are no known uses of this, it is still +supported by rclone, but the support will be removed in a future version. ## Configuration @@ -125,7 +190,10 @@ n) New remote s) Set configuration password q) Quit config n/s/q> n + +Enter name for new remote. name> remote + Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. @@ -134,60 +202,63 @@ XX / Jottacloud \ (jottacloud) [snip] Storage> jottacloud + +Option client_id. +OAuth Client Id. +Leave blank normally. +Enter a value. Press Enter to leave empty. +client_id> + +Option client_secret. +OAuth Client Secret. +Leave blank normally. +Enter a value. Press Enter to leave empty. +client_secret> + Edit advanced config? y) Yes n) No (default) y/n> n + Option config_type. -Select authentication type. -Choose a number from below, or type in an existing string value. +Type of authentication. +Choose a number from below, or type in an existing value of type string. Press Enter for the default (standard). / Standard authentication. - 1 | Use this if you're a normal Jottacloud user. + | This is primarily supported by the official service, but may also be + | supported by some white-label services. It is designed for command-line + 1 | applications, and you will be asked to enter a single-use personal login + | token which you must manually generate from the account security settings + | in the web interface of your service. \ (standard) + / Traditional authentication. + | This is supported by the official service and all white-label services + | that rclone knows about. You will be asked which service to connect to. + 2 | It has a limitation of only a single active authentication at a time. You + | need to be on, or have access to, a machine with an internet-connected + | web browser. + \ (traditional) / Legacy authentication. - 2 | This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users. + 3 | This is no longer supported by any known services and not recommended + | used. You will be asked for your account's username and password. \ (legacy) - / Telia Cloud authentication. - 3 | Use this if you are using Telia Cloud. - \ (telia) - / Tele2 Cloud authentication. - 4 | Use this if you are using Tele2 Cloud. - \ (tele2) - / Onlime Cloud authentication. - 5 | Use this if you are using Onlime Cloud. - \ (onlime) config_type> 1 + +Option config_login_token. Personal login token. -Generate here: https://www.jottacloud.com/web/secure -Login Token> +Generate it from the account security settings in the web interface of your +service, for the official service on https://www.jottacloud.com/web/secure. +Enter a value. +config_login_token> + Use a non-standard device/mountpoint? Choosing no, the default, will let you access the storage used for the archive section of the official Jottacloud client. If you instead want to access the sync or the backup section, for example, you must choose yes. y) Yes n) No (default) -y/n> y -Option config_device. -The device to use. In standard setup the built-in Jotta device is used, -which contains predefined mountpoints for archive, sync etc. All other devices -are treated as backup devices by the official Jottacloud client. You may create -a new by entering a unique name. -Choose a number from below, or type in your own string value. -Press Enter for the default (DESKTOP-3H31129). - 1 > DESKTOP-3H31129 - 2 > Jotta -config_device> 2 -Option config_mountpoint. -The mountpoint to use for the built-in device Jotta. -The standard setup is to use the Archive mountpoint. Most other mountpoints -have very limited support in rclone and should generally be avoided. -Choose a number from below, or type in an existing string value. -Press Enter for the default (Archive). - 1 > Archive - 2 > Shared - 3 > Sync -config_mountpoint> 1 +y/n> n + Configuration complete. Options: - type: jottacloud diff --git a/docs/content/onedrive.md b/docs/content/onedrive.md index 6420037d1..2d85a60f8 100644 --- a/docs/content/onedrive.md +++ b/docs/content/onedrive.md @@ -100,7 +100,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from Microsoft. This only runs from the moment it diff --git a/docs/content/pcloud.md b/docs/content/pcloud.md index 1a99e3ef3..79a803081 100644 --- a/docs/content/pcloud.md +++ b/docs/content/pcloud.md @@ -71,7 +71,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note if you are using remote config with rclone authorize while your pcloud server is the EU region, you will need to set the hostname in 'Edit advanced diff --git a/docs/content/premiumizeme.md b/docs/content/premiumizeme.md index 045fbb02d..7b4ed67f2 100644 --- a/docs/content/premiumizeme.md +++ b/docs/content/premiumizeme.md @@ -65,7 +65,7 @@ y/e/d> ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from premiumize.me. This only runs from the moment it opens diff --git a/docs/content/putio.md b/docs/content/putio.md index 4dba401a6..64d16a445 100644 --- a/docs/content/putio.md +++ b/docs/content/putio.md @@ -80,7 +80,7 @@ e/n/d/r/c/s/q> q ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from put.io if using web browser to automatically diff --git a/docs/content/remote_setup.md b/docs/content/remote_setup.md index 5c6fd2d5c..e5c1cd052 100644 --- a/docs/content/remote_setup.md +++ b/docs/content/remote_setup.md @@ -6,22 +6,23 @@ description: "Configuring rclone up on a remote / headless machine" # Configuring rclone on a remote / headless machine Some of the configurations (those involving oauth2) require an -Internet connected web browser. +internet-connected web browser. -If you are trying to set rclone up on a remote or headless box with no -browser available on it (e.g. a NAS or a server in a datacenter) then -you will need to use an alternative means of configuration. There are -two ways of doing it, described below. +If you are trying to set rclone up on a remote or headless machine with no +browser available on it (e.g. a NAS or a server in a datacenter), then +you will need to use an alternative means of configuration. There are +three ways of doing it, described below. ## Configuring using rclone authorize -On the headless box run `rclone` config but answer `N` to the `Use auto config?` -question. +On the headless machine run [rclone config](/commands/rclone_config), but +answer `N` to the question `Use web browser to automatically authenticate rclone with remote?`. ```text -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No @@ -33,33 +34,35 @@ a web browser available. For more help and alternate methods see: https://rclone.org/remote_setup/ Execute the following on the machine with the web browser (same rclone version recommended): -rclone authorize "onedrive" + rclone authorize "onedrive" Then paste the result. Enter a value. config_token> ``` -Then on your main desktop machine +Then on your main desktop machine, run [rclone authorize](/commands/rclone_authorize/). ```text rclone authorize "onedrive" -If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth -Log in and authorize rclone for access -Waiting for code... +NOTICE: Make sure your Redirect URL is set to "http://localhost:53682/" in your custom config. +NOTICE: If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx +NOTICE: Log in and authorize rclone for access +NOTICE: Waiting for code... + Got code Paste the following into your remote machine ---> SECRET_TOKEN <---End paste ``` -Then back to the headless box, paste in the code +Then back to the headless machine, paste in the code. ```text config_token> SECRET_TOKEN -------------------- [acd12] -client_id = -client_secret = +client_id = +client_secret = token = SECRET_TOKEN -------------------- y) Yes this is OK @@ -70,18 +73,19 @@ y/e/d> ## Configuring by copying the config file -Rclone stores all of its config in a single configuration file. This -can easily be copied to configure a remote rclone. +Rclone stores all of its configuration in a single file. This can easily be +copied to configure a remote rclone (although some backends does not support +reusing the same configuration, consult your backend documentation to be +sure). -So first configure rclone on your desktop machine with +Start by running [rclone config](/commands/rclone_config) to create the +configuration file on your desktop machine. ```sh rclone config ``` -to set up the config file. - -Find the config file by running `rclone config file`, for example +Then locate the file by running [rclone config file](/commands/rclone_config_file). ```sh $ rclone config file @@ -89,31 +93,37 @@ Configuration file is stored at: /home/user/.rclone.conf ``` -Now transfer it to the remote box (scp, cut paste, ftp, sftp, etc.) and -place it in the correct place (use `rclone config file` on the remote -box to find out where). +Finally, transfer the file to the remote machine (scp, cut paste, ftp, sftp, etc.) +and place it in the correct location (use [rclone config file](/commands/rclone_config_file) +on the remote machine to find out where). ## Configuring using SSH Tunnel -Linux and MacOS users can utilize SSH Tunnel to redirect the headless box -port 53682 to local machine by using the following command: +If you have an SSH client installed on your local machine, you can set up an +SSH tunnel to redirect the port 53682 into the headless machine by using the +following command: ```sh ssh -L localhost:53682:localhost:53682 username@remote_server ``` -Then on the headless box run `rclone config` and answer `Y` to the -`Use auto config?` question. +Then on the headless machine run [rclone config](/commands/rclone_config) and +answer `Y` to the question `Use web browser to automatically authenticate rclone with remote?`. ```text -Use auto config? - * Say Y if not sure - * Say N if you are working on a remote or headless machine +Use web browser to automatically authenticate rclone with remote? + * Say Y if the machine running rclone has a web browser you can use + * Say N if running rclone on a (remote) machine without web browser access +If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> y +NOTICE: Make sure your Redirect URL is set to "http://localhost:53682/" in your custom config. +NOTICE: If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx +NOTICE: Log in and authorize rclone for access +NOTICE: Waiting for code... ``` -Then copy and paste the auth url `http://127.0.0.1:53682/auth?state=xxxxxxxxxxxx` -to the browser on your local machine, complete the auth and it is done. +Finally, copy and paste the presented URL `http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx` +to the browser on your local machine, complete the auth and you are done. diff --git a/docs/content/s3.md b/docs/content/s3.md index 5b902854d..a930234eb 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -21,7 +21,9 @@ The S3 backend can be used with a number of different providers: {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}} {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}} {{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}} +{{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}} {{< provider name="GCS" home="https://cloud.google.com/storage/docs" config="/s3/#google-cloud-storage" >}} +{{< provider name="Hetzner" home="https://www.hetzner.com/storage/object-storage/" config="/s3/#hetzner" >}} {{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}} {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}} {{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}} @@ -38,6 +40,7 @@ The S3 backend can be used with a number of different providers: {{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}} {{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}} {{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}} +{{< provider name="Rabata Cloud Storage" home="https://rabata.io" config="/s3/#Rabata" >}} {{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}} {{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}} {{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}} @@ -3400,6 +3403,150 @@ endpoint = https://storage.googleapis.com This is Google bug [#312292516](https://issuetracker.google.com/u/0/issues/312292516). +### Hetzner Object Storage {#hetzner} + +Here is an example of making a [Hetzner Object Storage](https://www.hetzner.com/storage/object-storage/) +configuration. First run: + + rclone config + +This will guide you through an interactive setup process. + +``` +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +Enter name for new remote. +name> my-hetzner +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +[snip] + XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Outscale, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others + \ (s3) +[snip] +Storage> s3 +Option provider. +Choose your S3 provider. +Choose a number from below, or type in your own value. +Press Enter to leave empty. +[snip] +XX / Hetzner Object Storage + \ (Hetzner) +[snip] +provider> Hetzner +Option env_auth. +Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). +Only applies if access_key_id and secret_access_key is blank. +Choose a number from below, or type in your own boolean value (true or false). +Press Enter for the default (false). + 1 / Enter AWS credentials in the next step. + \ (false) + 2 / Get AWS credentials from the environment (env vars or IAM). + \ (true) +env_auth> +Option access_key_id. +AWS Access Key ID. +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +access_key_id> ACCESS_KEY +Option secret_access_key. +AWS Secret Access Key (password). +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +secret_access_key> SECRET_KEY +Option region. +Region to connect to. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / Helsinki + \ (hel1) + 2 / Falkenstein + \ (fsn1) + 3 / Nuremberg + \ (nbg1) +region> +Option endpoint. +Endpoint for Hetzner Object Storage +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / Helsinki + \ (hel1.your-objectstorage.com) + 2 / Falkenstein + \ (fsn1.your-objectstorage.com) + 3 / Nuremberg + \ (nbg1.your-objectstorage.com) +endpoint> +Option location_constraint. +Location constraint - must be set to match the Region. +Leave blank if not sure. Used when creating buckets only. +Enter a value. Press Enter to leave empty. +location_constraint> +Option acl. +Canned ACL used when creating buckets and storing or copying objects. +This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. +For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl +Note that this ACL is applied when server-side copying objects as S3 +doesn't copy the ACL from the source but rather writes a fresh one. +If the acl is an empty string then no X-Amz-Acl: header is added and +the default (private) will be used. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + / Owner gets FULL_CONTROL. + 1 | No one else has access rights (default). + \ (private) + / Owner gets FULL_CONTROL. + 2 | The AllUsers group gets READ access. + \ (public-read) +acl> +Edit advanced config? +y) Yes +n) No (default) +y/n> +Configuration complete. +Options: +- type: s3 +- provider: Hetzner +- access_key_id: ACCESS_KEY +- secret_access_key: SECRET_KEY +Keep this "my-hetzner" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> +Current remotes: + +Name Type +==== ==== +my-hetzner s3 + +e) Edit existing remote +n) New remote +d) Delete remote +r) Rename remote +c) Copy remote +s) Set configuration password +q) Quit config +e/n/d/r/c/s/q> +``` + +This will leave the config file looking like this. + +``` +[my-hetzner] +type = s3 +provider = Hetzner +access_key_id = ACCESS_KEY +secret_access_key = SECRET_KEY +region = hel1 +endpoint = hel1.your-objectstorage.com +acl = private +``` + + ### Huawei OBS {#huawei-obs} Object Storage Service (OBS) provides stable, secure, efficient, and easy-to-use cloud storage that lets you store virtually any volume of unstructured data in any format and access it from anywhere. @@ -5635,6 +5782,244 @@ Name Type qiniu s3 ``` +### FileLu S5 {#filelu-s5} + +[FileLu S5 Object Storage](https://s5lu.com) is an S3-compatible object storage system. +It provides multiple region options (Global, US-East, EU-Central, AP-Southeast, and ME-Central) while using a single endpoint (`s5lu.com`). +FileLu S5 is designed for scalability, security, and simplicity, with predictable pricing and no hidden charges for data transfers or API requests. + +Here is an example of making a configuration. First run: + +```sh +rclone config +``` + +This will guide you through an interactive setup process. + +```text +No remotes found, make a new one\? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +Enter name for new remote. +name> s5lu + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +[snip] +XX / Amazon S3 Compliant Storage Providers including AWS,... FileLu, ... + \ (s3) +[snip] +Storage> s3 + +Option provider. +Choose your S3 provider. +Choose a number from below, or type in your own value. +Press Enter to leave empty. +[snip] +XX / FileLu S5 Object Storage + \ (FileLu) +[snip] +provider> FileLu + +Option env_auth. +Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). +Only applies if access_key_id and secret_access_key is blank. +Choose a number from below, or type in your own boolean value (true or false). +Press Enter for the default (false). + 1 / Enter AWS credentials in the next step. + \ (false) + 2 / Get AWS credentials from the environment (env vars or IAM). + \ (true) +env_auth> + +Option access_key_id. +AWS Access Key ID. +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +access_key_id> XXX + +Option secret_access_key. +AWS Secret Access Key (password). +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +secret_access_key> XXX + +Option endpoint. +Endpoint for S3 API. +Required when using an S3 clone. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / Global + \ (global) + 2 / North America (US-East) + \ (us-east) + 3 / Europe (EU-Central) + \ (eu-central) + 4 / Asia Pacific (AP-Southeast) + \ (ap-southeast) + 5 / Middle East (ME-Central) + \ (me-central) +region> 1 + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: s3 +- provider: FileLu +- access_key_id: XXX +- secret_access_key: XXX +- endpoint: s5lu.com +Keep this "s5lu" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +This will leave the config file looking like this. + +``` +[s5lu] +type = s3 +provider = FileLu +access_key_id = XXX +secret_access_key = XXX +endpoint = s5lu.com +``` + +### Rabata {#Rabata} + +[Rabata](https://rabata.io) is an S3-compatible secure cloud storage service that offers flat, transparent pricing (no API request fees) +while supporting standard S3 APIs. It is suitable for backup, application storage,media workflows, and archive use cases. + +Server side copy is not implemented with Rabata, also meaning modification time of objects cannot be updated. + +Rclone config: + +``` +rclone config +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +Enter name for new remote. +name> Rabata + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +[snip] +XX / Amazon S3 Compliant Storage Providers including AWS, ... + \ (s3) +[snip] +Storage> s3 + +Option provider. +Choose your S3 provider. +Choose a number from below, or type in your own value. +Press Enter to leave empty. +[snip] +XX / Rabata Cloud Storage + \ (Rabata) +[snip] +provider> Rabata + +Option env_auth. +Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). +Only applies if access_key_id and secret_access_key is blank. +Choose a number from below, or type in your own boolean value (true or false). +Press Enter for the default (false). + 1 / Enter AWS credentials in the next step. + \ (false) + 2 / Get AWS credentials from the environment (env vars or IAM). + \ (true) +env_auth> + +Option access_key_id. +AWS Access Key ID. +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +access_key_id> ACCESS_KEY_ID + +Option secret_access_key. +AWS Secret Access Key (password). +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +secret_access_key> SECRET_ACCESS_KEY + +Option region. +Region where your bucket will be created and your data stored. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / US East (N. Virginia) + \ (us-east-1) + 2 / EU (Ireland) + \ (eu-west-1) + 3 / EU (London) + \ (eu-west-2) +region> 3 + +Option endpoint. +Endpoint for Rabata Object Storage. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / US East (N. Virginia) + \ (s3.us-east-1.rabata.io) + 2 / EU West (Ireland) + \ (s3.eu-west-1.rabata.io) + 3 / EU West (London) + \ (s3.eu-west-2.rabata.io) +endpoint> 3 + +Option location_constraint. +location where your bucket will be created and your data stored. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / US East (N. Virginia) + \ (us-east-1) + 2 / EU (Ireland) + \ (eu-west-1) + 3 / EU (London) + \ (eu-west-2) +location_constraint> 3 + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: s3 +- provider: Rabata +- access_key_id: ACCESS_KEY_ID +- secret_access_key: SECRET_ACCESS_KEY +- region: eu-west-2 +- endpoint: s3.eu-west-2.rabata.io +- location_constraint: eu-west-2 +Keep this "rabata" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y + +Current remotes: + +Name Type +==== ==== +rabata s3 +``` + ### RackCorp {#RackCorp} [RackCorp Object Storage](https://www.rackcorp.com/storage/s3storage) is an S3 compatible object storage platform from your friendly cloud provider RackCorp. diff --git a/docs/content/sharefile.md b/docs/content/sharefile.md index d8e8501c8..65280c876 100644 --- a/docs/content/sharefile.md +++ b/docs/content/sharefile.md @@ -84,7 +84,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from Citrix ShareFile. This only runs from the moment it opens diff --git a/docs/content/smb.md b/docs/content/smb.md index 0985ebcac..7fcd91266 100644 --- a/docs/content/smb.md +++ b/docs/content/smb.md @@ -21,7 +21,7 @@ you started to share on Windows. On smbd, it's the section title in `smb.conf` (usually in `/etc/samba/`) file. You can find shares by querying the root if you're unsure (e.g. `rclone lsd remote:`). -You can't access to the shared printers from rclone, obviously. +You can't access the shared printers from rclone, obviously. You can't use Anonymous access for logging in. You have to use the `guest` user with an empty password instead. The rclone client tries to avoid 8.3 names when diff --git a/docs/content/yandex.md b/docs/content/yandex.md index ae995df54..ae59e836c 100644 --- a/docs/content/yandex.md +++ b/docs/content/yandex.md @@ -61,7 +61,7 @@ y/e/d> y ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Note that rclone runs a webserver on your local machine to collect the token as returned from Yandex Disk. This only runs from the moment it diff --git a/docs/content/zoho.md b/docs/content/zoho.md index 9b889c26c..e538d7996 100644 --- a/docs/content/zoho.md +++ b/docs/content/zoho.md @@ -80,7 +80,7 @@ y/e/d> ``` See the [remote setup docs](/remote_setup/) for how to set it up on a -machine with no Internet browser available. +machine without an internet-connected web browser available. Rclone runs a webserver on your local computer to collect the authorization token from Zoho Workdrive. This is only from the moment diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index dd67fa9fa..3180139dd 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -19,6 +19,7 @@ Filtering GUI Remote Control + Remote Setup Changelog Bugs FAQ @@ -107,7 +108,6 @@ SMB / CIFS Storj SugarSync - Terabox Uloz.to Uptobox Union (merge backends) diff --git a/fs/accounting/stats.go b/fs/accounting/stats.go index 64f160953..fbc46bebb 100644 --- a/fs/accounting/stats.go +++ b/fs/accounting/stats.go @@ -22,48 +22,52 @@ const ( averageStopAfter = time.Minute ) -// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list +// MaxCompletedTransfers specifies the default maximum number of +// completed transfers in startedTransfers list. This can be adjusted +// for a given StatsInfo by calling the SetMaxCompletedTransfers +// method. var MaxCompletedTransfers = 100 // StatsInfo accounts all transfers // N.B.: if this struct is modified, please remember to also update sum() function in stats_groups // to correctly count the updated fields type StatsInfo struct { - mu sync.RWMutex - ctx context.Context - ci *fs.ConfigInfo - bytes int64 - errors int64 - lastError error - fatalError bool - retryError bool - retryAfter time.Time - checks int64 - checking *transferMap - checkQueue int - checkQueueSize int64 - transfers int64 - transferring *transferMap - transferQueue int - transferQueueSize int64 - listed int64 - renames int64 - renameQueue int - renameQueueSize int64 - deletes int64 - deletesSize int64 - deletedDirs int64 - inProgress *inProgress - startedTransfers []*Transfer // currently active transfers - oldTimeRanges timeRanges // a merged list of time ranges for the transfers - oldDuration time.Duration // duration of transfers we have culled - group string - startTime time.Time // the moment these stats were initialized or reset - average averageValues - serverSideCopies int64 - serverSideCopyBytes int64 - serverSideMoves int64 - serverSideMoveBytes int64 + mu sync.RWMutex + ctx context.Context + ci *fs.ConfigInfo + bytes int64 + errors int64 + lastError error + fatalError bool + retryError bool + retryAfter time.Time + checks int64 + checking *transferMap + checkQueue int + checkQueueSize int64 + transfers int64 + transferring *transferMap + transferQueue int + transferQueueSize int64 + listed int64 + renames int64 + renameQueue int + renameQueueSize int64 + deletes int64 + deletesSize int64 + deletedDirs int64 + inProgress *inProgress + startedTransfers []*Transfer // currently active transfers + oldTimeRanges timeRanges // a merged list of time ranges for the transfers + oldDuration time.Duration // duration of transfers we have culled + group string + startTime time.Time // the moment these stats were initialized or reset + average averageValues + serverSideCopies int64 + serverSideCopyBytes int64 + serverSideMoves int64 + serverSideMoveBytes int64 + maxCompletedTransfers int } type averageValues struct { @@ -81,17 +85,26 @@ type averageValues struct { func NewStats(ctx context.Context) *StatsInfo { ci := fs.GetConfig(ctx) s := &StatsInfo{ - ctx: ctx, - ci: ci, - checking: newTransferMap(ci.Checkers, "checking"), - transferring: newTransferMap(ci.Transfers, "transferring"), - inProgress: newInProgress(ctx), - startTime: time.Now(), - average: averageValues{}, + ctx: ctx, + ci: ci, + checking: newTransferMap(ci.Checkers, "checking"), + transferring: newTransferMap(ci.Transfers, "transferring"), + inProgress: newInProgress(ctx), + startTime: time.Now(), + average: averageValues{}, + maxCompletedTransfers: MaxCompletedTransfers, } return s } +// SetMaxCompletedTransfers sets the maximum number of completed transfers to keep. +func (s *StatsInfo) SetMaxCompletedTransfers(n int) *StatsInfo { + s.mu.Lock() + s.maxCompletedTransfers = n + s.mu.Unlock() + return s +} + // RemoteStats returns stats for rc // // If short is true then the transfers and checkers won't be added. @@ -912,22 +925,31 @@ func (s *StatsInfo) RemoveTransfer(transfer *Transfer) { } // PruneTransfers makes sure there aren't too many old transfers by removing -// single finished transfer. -func (s *StatsInfo) PruneTransfers() { - if MaxCompletedTransfers < 0 { - return - } +// a single finished transfer. Returns true if it removed a transfer. +func (s *StatsInfo) PruneTransfers() bool { s.mu.Lock() + defer s.mu.Unlock() + if s.maxCompletedTransfers < 0 { + return false + } + removed := false // remove a transfer from the start if we are over quota - if len(s.startedTransfers) > MaxCompletedTransfers+s.ci.Transfers { + if len(s.startedTransfers) > s.maxCompletedTransfers+s.ci.Transfers { for i, tr := range s.startedTransfers { if tr.IsDone() { s._removeTransfer(tr, i) + removed = true break } } } - s.mu.Unlock() + return removed +} + +// RemoveDoneTransfers removes all Done transfers. +func (s *StatsInfo) RemoveDoneTransfers() { + for s.PruneTransfers() { + } } // AddServerSideMove counts a server side move diff --git a/fs/accounting/stats_test.go b/fs/accounting/stats_test.go index 6fcc09837..317cd07cd 100644 --- a/fs/accounting/stats_test.go +++ b/fs/accounting/stats_test.go @@ -465,3 +465,27 @@ func TestPruneTransfers(t *testing.T) { }) } } + +func TestRemoveDoneTransfers(t *testing.T) { + ctx := context.Background() + s := NewStats(ctx) + const transfers = 10 + for i := int64(1); i <= int64(transfers); i++ { + s.AddTransfer(&Transfer{ + startedAt: time.Unix(i, 0), + completedAt: time.Unix(i+1, 0), + }) + } + + s.mu.Lock() + assert.Equal(t, time.Duration(transfers)*time.Second, s._totalDuration()) + assert.Equal(t, transfers, len(s.startedTransfers)) + s.mu.Unlock() + + s.RemoveDoneTransfers() + + s.mu.Lock() + assert.Equal(t, time.Duration(transfers)*time.Second, s._totalDuration()) + assert.Equal(t, transfers, len(s.startedTransfers)) + s.mu.Unlock() +} diff --git a/fs/bwtimetable.go b/fs/bwtimetable.go index 9a2173862..f17e3ee30 100644 --- a/fs/bwtimetable.go +++ b/fs/bwtimetable.go @@ -151,7 +151,7 @@ func (x *BwTimetable) Set(s string) error { } // Split the timetable string by both spaces and semicolons - for _, tok := range strings.FieldsFunc(s, func(r rune) bool { + for tok := range strings.FieldsFuncSeq(s, func(r rune) bool { return r == ' ' || r == ';' }) { tv := strings.Split(tok, ",") diff --git a/fs/config/authorize.go b/fs/config/authorize.go index 8f3aa86ce..37be3ae43 100644 --- a/fs/config/authorize.go +++ b/fs/config/authorize.go @@ -12,9 +12,9 @@ import ( // // It expects 1, 2 or 3 arguments // -// rclone authorize "fs name" -// rclone authorize "fs name" "base64 encoded JSON blob" -// rclone authorize "fs name" "client id" "client secret" +// rclone authorize "backend name" +// rclone authorize "backend name" "base64 encoded JSON blob" +// rclone authorize "backend name" "client id" "client secret" func Authorize(ctx context.Context, args []string, noAutoBrowser bool, templateFile string) error { ctx = suppressConfirm(ctx) ctx = fs.ConfigOAuthOnly(ctx) diff --git a/fs/config/configmap/configmap.go b/fs/config/configmap/configmap.go index ee8d4918f..d52514ef2 100644 --- a/fs/config/configmap/configmap.go +++ b/fs/config/configmap/configmap.go @@ -136,9 +136,11 @@ func (c Simple) Set(key, value string) { c[key] = value } -// String the map value the same way the config parser does, but with +// string the map value the same way the config parser does, but with // sorted keys for reproducibility. -func (c Simple) String() string { +// +// If human is set then use fewer quotes. +func (c Simple) string(human bool) string { var ks = make([]string, 0, len(c)) for k := range c { ks = append(ks, k) @@ -150,20 +152,41 @@ func (c Simple) String() string { out.WriteRune(',') } out.WriteString(k) + v := c[k] + if human && v == "true" { + continue + } out.WriteRune('=') - out.WriteRune('\'') - for _, ch := range c[k] { - out.WriteRune(ch) - // Escape ' as '' - if ch == '\'' { + if !human || strings.ContainsAny(v, `'":=,`) { + out.WriteRune('\'') + for _, ch := range v { out.WriteRune(ch) + // Escape ' as '' + if ch == '\'' { + out.WriteRune(ch) + } } + out.WriteRune('\'') + } else { + out.WriteString(v) } - out.WriteRune('\'') } return out.String() } +// Human converts the map value the same way the config parser does, +// but with sorted keys for reproducibility. This does it in human +// readable form with fewer quotes. +func (c Simple) Human() string { + return c.string(true) +} + +// String the map value the same way the config parser does, but with +// sorted keys for reproducibility. +func (c Simple) String() string { + return c.string(false) +} + // Encode from c into a string suitable for putting on the command line func (c Simple) Encode() (string, error) { if len(c) == 0 { diff --git a/fs/config/configmap/configmap_external_test.go b/fs/config/configmap/configmap_external_test.go new file mode 100644 index 000000000..236f02933 --- /dev/null +++ b/fs/config/configmap/configmap_external_test.go @@ -0,0 +1,121 @@ +package configmap_test + +import ( + "fmt" + "testing" + + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/fspath" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSimpleString(t *testing.T) { + for _, tt := range []struct { + name string + want string + in configmap.Simple + }{ + {name: "Nil", want: "", in: configmap.Simple(nil)}, + {name: "Empty", want: "", in: configmap.Simple{}}, + {name: "Basic", want: "config1='one'", in: configmap.Simple{ + "config1": "one", + }}, + {name: "Truthy", want: "config1='true',config2='true'", in: configmap.Simple{ + "config1": "true", + "config2": "true", + }}, + {name: "Quotable", want: `config1='"one"',config2=':two:',config3='''three''',config4='=four=',config5=',five,'`, in: configmap.Simple{ + "config1": `"one"`, + "config2": `:two:`, + "config3": `'three'`, + "config4": `=four=`, + "config5": `,five,`, + }}, + {name: "Order", want: "config1='one',config2='two',config3='three',config4='four',config5='five'", in: configmap.Simple{ + "config5": "five", + "config4": "four", + "config3": "three", + "config2": "two", + "config1": "one", + }}, + {name: "Escaping", want: "apple='',config1='o''n''e'", in: configmap.Simple{ + "config1": "o'n'e", + "apple": "", + }}, + } { + t.Run(tt.name, func(t *testing.T) { + // Check forwards + params := tt.in.String() + assert.Equal(t, tt.want, params) + + // Check config round trips through config parser + remote := ":local," + params + ":" + if params == "" { + remote = ":local:" + } + what := fmt.Sprintf("remote = %q", remote) + parsed, err := fspath.Parse(remote) + require.NoError(t, err, what) + if len(parsed.Config) != 0 || len(tt.in) != 0 { + assert.Equal(t, tt.in, parsed.Config, what) + } + }) + } + +} + +func TestSimpleHuman(t *testing.T) { + for _, tt := range []struct { + name string + want string + in configmap.Simple + }{ + {name: "Nil", want: "", in: configmap.Simple(nil)}, + {name: "Empty", want: "", in: configmap.Simple{}}, + {name: "Basic", want: "config1=one", in: configmap.Simple{ + "config1": "one", + }}, + {name: "Truthy", want: "config1,config2", in: configmap.Simple{ + "config1": "true", + "config2": "true", + }}, + {name: "Quotable", want: `config1='"one"',config2=':two:',config3='''three''',config4='=four=',config5=',five,'`, in: configmap.Simple{ + "config1": `"one"`, + "config2": `:two:`, + "config3": `'three'`, + "config4": `=four=`, + "config5": `,five,`, + }}, + {name: "Order", want: "config1=one,config2=two,config3=three,config4=four,config5=five", in: configmap.Simple{ + "config5": "five", + "config4": "four", + "config3": "three", + "config2": "two", + "config1": "one", + }}, + {name: "Escaping", want: "apple=,config1='o''n''e'", in: configmap.Simple{ + "config1": "o'n'e", + "apple": "", + }}, + } { + t.Run(tt.name, func(t *testing.T) { + // Check forwards + params := tt.in.Human() + assert.Equal(t, tt.want, params) + + // Check config round trips through config parser + remote := ":local," + params + ":" + if params == "" { + remote = ":local:" + } + what := fmt.Sprintf("remote = %q", remote) + parsed, err := fspath.Parse(remote) + require.NoError(t, err, what) + if len(parsed.Config) != 0 || len(tt.in) != 0 { + assert.Equal(t, tt.in, parsed.Config, what) + } + }) + } + +} diff --git a/fs/config/configmap/configmap_test.go b/fs/config/configmap/configmap_test.go index 85d4686f1..b68847360 100644 --- a/fs/config/configmap/configmap_test.go +++ b/fs/config/configmap/configmap_test.go @@ -246,30 +246,6 @@ func TestConfigMapClearSetters(t *testing.T) { assert.Equal(t, []Setter(nil), m.setters) } -func TestSimpleString(t *testing.T) { - // Basic - assert.Equal(t, "", Simple(nil).String()) - assert.Equal(t, "", Simple{}.String()) - assert.Equal(t, "config1='one'", Simple{ - "config1": "one", - }.String()) - - // Check ordering - assert.Equal(t, "config1='one',config2='two',config3='three',config4='four',config5='five'", Simple{ - "config5": "five", - "config4": "four", - "config3": "three", - "config2": "two", - "config1": "one", - }.String()) - - // Check escaping - assert.Equal(t, "apple='',config1='o''n''e'", Simple{ - "config1": "o'n'e", - "apple": "", - }.String()) -} - func TestSimpleEncode(t *testing.T) { for _, test := range []struct { in Simple diff --git a/fs/config/configstruct/configstruct.go b/fs/config/configstruct/configstruct.go index f90806a2d..ba421f7f2 100644 --- a/fs/config/configstruct/configstruct.go +++ b/fs/config/configstruct/configstruct.go @@ -261,7 +261,7 @@ func Set(config configmap.Getter, opt any) (err error) { } // setIfSameType set aPtr with b if they are the same type or returns false. -func setIfSameType(aPtr interface{}, b interface{}) bool { +func setIfSameType(aPtr any, b any) bool { aVal := reflect.ValueOf(aPtr).Elem() bVal := reflect.ValueOf(b) diff --git a/fs/config/flags/flags.go b/fs/config/flags/flags.go index 953a3d0fd..912c0b2d6 100644 --- a/fs/config/flags/flags.go +++ b/fs/config/flags/flags.go @@ -70,7 +70,7 @@ func (gs *Groups) Include(groupsString string) *Groups { return gs } want := map[string]bool{} - for _, groupName := range strings.Split(groupsString, ",") { + for groupName := range strings.SplitSeq(groupsString, ",") { _, ok := All.ByName[groupName] if !ok { fs.Fatalf(nil, "Couldn't find group %q in command annotation", groupName) @@ -173,7 +173,7 @@ func installFlag(flags *pflag.FlagSet, name string, groupsString string) { // Add flag to Group if it is a global flag if groupsString != "" && flags == pflag.CommandLine { - for _, groupName := range strings.Split(groupsString, ",") { + for groupName := range strings.SplitSeq(groupsString, ",") { if groupName == "rc-" { groupName = "RC" } diff --git a/fs/config/rc.go b/fs/config/rc.go index c87dcb318..62484541c 100644 --- a/fs/config/rc.go +++ b/fs/config/rc.go @@ -145,7 +145,6 @@ func rcProviders(ctx context.Context, in rc.Params) (out rc.Params, err error) { func init() { for _, name := range []string{"create", "update", "password"} { - name := name extraHelp := "" if name == "create" { extraHelp = "- type - type of the new remote\n" diff --git a/fs/dirtree/dirtree_test.go b/fs/dirtree/dirtree_test.go index 45cd9a3aa..8a88fc18d 100644 --- a/fs/dirtree/dirtree_test.go +++ b/fs/dirtree/dirtree_test.go @@ -213,7 +213,7 @@ func BenchmarkCheckParents(b *testing.B) { dt.Add(o) } b.StartTimer() - for n := 0; n < b.N; n++ { + for b.Loop() { dt.CheckParents("") } }) diff --git a/fs/list/helpers_test.go b/fs/list/helpers_test.go index 8973b9ddd..412857c8f 100644 --- a/fs/list/helpers_test.go +++ b/fs/list/helpers_test.go @@ -62,7 +62,7 @@ func TestListRHelperSend(t *testing.T) { helper := NewHelper(callback) // Add 100 entries to force the callback to be invoked - for i := 0; i < 100; i++ { + for range 100 { require.NoError(t, helper.Add(entry)) } @@ -120,7 +120,7 @@ var _ fs.ListPer = (*mockListPfs)(nil) func TestListWithListP(t *testing.T) { ctx := context.Background() var entries fs.DirEntries - for i := 0; i < 26; i++ { + for i := range 26 { entries = append(entries, mockobject.New(fmt.Sprintf("%c", 'A'+i))) } t.Run("NoError", func(t *testing.T) { diff --git a/fs/list/sorter.go b/fs/list/sorter.go index cc1d55a5b..b2674b294 100644 --- a/fs/list/sorter.go +++ b/fs/list/sorter.go @@ -222,7 +222,6 @@ func (lh *listHelper) send(max int) (err error) { g, gCtx := errgroup.WithContext(lh.ls.ctx) g.SetLimit(lh.ls.ci.Checkers) for i, key := range lh.keys { - i, key := i, key // can remove when go1.22 is minimum version g.Go(func() error { lh.entries[i], lh.errs[i] = lh.ls.keyToEntry(gCtx, key) return nil diff --git a/fs/list/sorter_test.go b/fs/list/sorter_test.go index 7d34e0975..b8db3725e 100644 --- a/fs/list/sorter_test.go +++ b/fs/list/sorter_test.go @@ -144,7 +144,7 @@ func testSorterExt(t *testing.T, cutoff, N int, wantExtSort bool, keyFn KeyFn) { // Make the directory entries entriesMap := make(map[string]fs.DirEntry, N) - for i := 0; i < N; i++ { + for i := range N { remote := fmt.Sprintf("%010d", i) prefix := "a" if i%3 == 0 { diff --git a/fs/march/march.go b/fs/march/march.go index 67a5b5bfa..841ca4cc7 100644 --- a/fs/march/march.go +++ b/fs/march/march.go @@ -137,20 +137,22 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool, keyFn ) return func(dir string, callback fs.ListRCallback) (err error) { mu.Lock() - defer mu.Unlock() if !started { dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List dirs, dirsErr = walk.NewDirTree(dirCtx, f, m.Dir, includeAll, ci.MaxDepth) started = true } if dirsErr != nil { + mu.Unlock() return dirsErr } entries, ok := dirs[dir] if !ok { + mu.Unlock() return fs.ErrorDirNotFound } delete(dirs, dir) + mu.Unlock() // We use a stable sort here just in case there are // duplicates. Assuming the remote delivers the entries in a diff --git a/fs/mimetype.go b/fs/mimetype.go index 93f82172d..44d423738 100644 --- a/fs/mimetype.go +++ b/fs/mimetype.go @@ -32,7 +32,7 @@ func init() { {"video/x-matroska", ".mpv,.mkv"}, {"application/x-subrip", ".srt"}, } { - for _, ext := range strings.Split(t.extensions, ",") { + for ext := range strings.SplitSeq(t.extensions, ",") { if mime.TypeByExtension(ext) == "" { err := mime.AddExtensionType(ext, t.mimeType) if err != nil { diff --git a/fs/operations/rc.go b/fs/operations/rc.go index cd9160f73..644a947e9 100644 --- a/fs/operations/rc.go +++ b/fs/operations/rc.go @@ -160,7 +160,6 @@ func rcAbout(ctx context.Context, in rc.Params) (out rc.Params, err error) { func init() { for _, copy := range []bool{false, true} { - copy := copy name := "Move" if copy { name = "Copy" @@ -217,7 +216,6 @@ func init() { {name: "settier", title: "Changes storage tier or class on all files in the path", noRemote: true}, {name: "settierfile", title: "Changes storage tier or class on the single file pointed to", noCommand: true}, } { - op := op var remote, command string if !op.noRemote { remote = "- remote - a path within that remote e.g. \"dir\"\n" diff --git a/fs/rc/config.go b/fs/rc/config.go index 2fb0fc424..7aa0c9fed 100644 --- a/fs/rc/config.go +++ b/fs/rc/config.go @@ -64,7 +64,7 @@ func filterBlocks(in Params, f func(oi fs.OptionsInfo)) (err error) { return err } blocks := map[string]struct{}{} - for _, name := range strings.Split(blocksStr, ",") { + for name := range strings.SplitSeq(blocksStr, ",") { if name != "" { blocks[name] = struct{}{} } diff --git a/fs/rc/jobs/job_test.go b/fs/rc/jobs/job_test.go index dcce3879b..de90e26a4 100644 --- a/fs/rc/jobs/job_test.go +++ b/fs/rc/jobs/job_test.go @@ -206,7 +206,7 @@ func TestJobRunPanic(t *testing.T) { runtime.Gosched() // yield to make sure job is updated // Wait a short time for the panic to propagate - for i := uint(0); i < 10; i++ { + for i := range uint(10) { job.mu.Lock() e := job.Error job.mu.Unlock() @@ -539,8 +539,7 @@ func TestOnFinish(t *testing.T) { func TestOnFinishAlreadyFinished(t *testing.T) { jobID.Store(0) done := make(chan struct{}) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + ctx := t.Context() job, _, err := NewJob(ctx, shortFn, rc.Params{}) assert.NoError(t, err) diff --git a/fs/sync/rc.go b/fs/sync/rc.go index 42ca315f6..6577e498b 100644 --- a/fs/sync/rc.go +++ b/fs/sync/rc.go @@ -8,7 +8,6 @@ import ( func init() { for _, name := range []string{"sync", "copy", "move"} { - name := name moveHelp := "" if name == "move" { moveHelp = "- deleteEmptySrcDirs - delete empty src directories if set\n" diff --git a/fs/sync/sync.go b/fs/sync/sync.go index e5feb5e3c..a425345a4 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -743,7 +743,7 @@ func parseTrackRenamesStrategy(strategies string) (strategy trackRenamesStrategy if len(strategies) == 0 { return strategy, nil } - for _, s := range strings.Split(strategies, ",") { + for s := range strings.SplitSeq(strategies, ",") { switch s { case "hash": strategy |= trackRenamesStrategyHash diff --git a/fs/sync/sync_transform_test.go b/fs/sync/sync_transform_test.go index 8d0621095..d43594509 100644 --- a/fs/sync/sync_transform_test.go +++ b/fs/sync/sync_transform_test.go @@ -136,7 +136,7 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item { items := []fstest.Item{} for _, c := range alphabet { var out strings.Builder - for i := rune(0); i < 7; i++ { + for i := range rune(7) { out.WriteRune(c + i) } fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String())) diff --git a/fstest/runs/report.go b/fstest/runs/report.go index f3c7580aa..39787a2fb 100644 --- a/fstest/runs/report.go +++ b/fstest/runs/report.go @@ -1,15 +1,16 @@ package runs import ( + "bytes" "encoding/json" "fmt" "html/template" "os" "os/exec" "path" - "regexp" "runtime" "sort" + "strings" "time" "github.com/rclone/rclone/fs" @@ -45,14 +46,6 @@ type ReportRun struct { Runs Runs } -// Parse version numbers -// v1.49.0 -// v1.49.0-031-g2298834e-beta -// v1.49.0-032-g20793a5f-sharefile-beta -// match 1 is commit number -// match 2 is branch name -var parseVersion = regexp.MustCompile(`^v(?:[0-9.]+)-(?:\d+)-g([0-9a-f]+)(?:-(.*))?-beta$`) - // FIXME take -issue or -pr parameter... // NewReport initialises and returns a Report @@ -82,17 +75,33 @@ func NewReport(Opt RunOpt) *Report { // Online version r.URL = Opt.URLBase + r.DateTime + "/index.html" - // Get branch/commit out of version - parts := parseVersion.FindStringSubmatch(r.Version) - if len(parts) >= 3 { - r.Commit = parts[1] - r.Branch = parts[2] + // Get branch/commit + r.Branch, r.Commit = gitBranchAndCommit() + + return r +} + +// gitBranchAndCommit returns the current branch and commit hash. +// +// It returns "" on error. +func gitBranchAndCommit() (branch, commit string) { + // branch (empty if detached) + var b bytes.Buffer + cmdB := exec.Command("git", "symbolic-ref", "--short", "-q", "HEAD") + cmdB.Stdout = &b + if e := cmdB.Run(); e == nil { + branch = strings.TrimSpace(b.String()) } - if r.Branch == "" { - r.Branch = "master" + + // commit (full SHA) + var c bytes.Buffer + cmdC := exec.Command("git", "rev-parse", "HEAD") + cmdC.Stdout = &c + if e := cmdC.Run(); e == nil { + commit = strings.TrimSpace(c.String()) } - return r + return branch, commit } // End should be called when the tests are complete diff --git a/fstest/runs/run.go b/fstest/runs/run.go index 2859e7392..c898d3919 100644 --- a/fstest/runs/run.go +++ b/fstest/runs/run.go @@ -157,7 +157,7 @@ func testsToRegexp(tests []string) string { // Make a trie showing which parts are used at each level for _, test := range tests { parent := split - for _, name := range strings.Split(test, "/") { + for name := range strings.SplitSeq(test, "/") { current := parent[name] if current == nil { current = trie{} diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index b5311bae7..5cec06838 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -139,6 +139,7 @@ backends: - backend: "compress" remote: "TestCompressDrive:" fastlist: false + extratime: 2.0 - backend: "compress" remote: "TestCompressS3:" fastlist: false @@ -610,6 +611,7 @@ backends: - backend: "zoho" remote: "TestZoho:" fastlist: false + extratime: 2.0 tests: - backend - backend: "hdfs" diff --git a/fstest/testserver/init.d/TestSwiftAIO b/fstest/testserver/init.d/TestSwiftAIO index 7a04d63ff..7e20bd67c 100755 --- a/fstest/testserver/init.d/TestSwiftAIO +++ b/fstest/testserver/init.d/TestSwiftAIO @@ -8,10 +8,12 @@ PORT=28628 . $(dirname "$0")/docker.bash start() { + # We need to replace the remakerings in the container to create Policy-1. docker run --rm -d --name ${NAME} \ -p 127.0.0.1:${PORT}:8080 \ - bouncestorage/swift-aio - + -v $(dirname "$0")/TestSwiftAIO.d/remakerings:/etc/swift/remakerings:ro \ + openstackswift/saio + echo type=swift echo env_auth=false echo user=test:tester diff --git a/fstest/testserver/init.d/TestSwiftAIO.d/remakerings b/fstest/testserver/init.d/TestSwiftAIO.d/remakerings new file mode 100755 index 000000000..27c49b17f --- /dev/null +++ b/fstest/testserver/init.d/TestSwiftAIO.d/remakerings @@ -0,0 +1,46 @@ +#!/bin/sh + +if ! grep -q "^\[storage-policy:1\]" swift.conf; then + cat <> swift.conf + +[storage-policy:1] +name = Policy-1 +EOF +fi + +rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + +swift-ring-builder object.builder create 10 1 1 +swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d0 1 +swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d1 1 +swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d2 1 +swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d3 1 +swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d4 1 +swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d5 1 +swift-ring-builder object.builder rebalance +swift-ring-builder container.builder create 10 1 1 +swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d0 1 +swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d1 1 +swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d2 1 +swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d3 1 +swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d4 1 +swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d5 1 +swift-ring-builder container.builder rebalance +swift-ring-builder account.builder create 10 1 1 +swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d0 1 +swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d1 1 +swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d2 1 +swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d3 1 +swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d4 1 +swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d5 1 +swift-ring-builder account.builder rebalance + +# For Policy-1: +swift-ring-builder object-1.builder create 10 1 1 +swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d0 1 +swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d1 1 +swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d2 1 +swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d3 1 +swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d4 1 +swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d5 1 +swift-ring-builder object-1.builder rebalance diff --git a/fstest/testserver/init.d/TestSwiftAIOsegments b/fstest/testserver/init.d/TestSwiftAIOsegments index 197487bcc..db02630ee 100755 --- a/fstest/testserver/init.d/TestSwiftAIOsegments +++ b/fstest/testserver/init.d/TestSwiftAIOsegments @@ -8,9 +8,11 @@ PORT=28632 . $(dirname "$0")/docker.bash start() { + # We need to replace the remakerings in the container to create Policy-1. docker run --rm -d --name ${NAME} \ -p 127.0.0.1:${PORT}:8080 \ - bouncestorage/swift-aio + -v $(dirname "$0")/TestSwiftAIO.d/remakerings:/etc/swift/remakerings:ro \ + openstackswift/saio echo type=swift echo env_auth=false diff --git a/fstest/testserver/testserver.go b/fstest/testserver/testserver.go index 2d7fe44bd..18e7eabfe 100644 --- a/fstest/testserver/testserver.go +++ b/fstest/testserver/testserver.go @@ -82,7 +82,7 @@ func start(name string) error { // parse the output and set environment vars from it var connect string var connectDelay time.Duration - for _, line := range bytes.Split(out, []byte("\n")) { + for line := range bytes.SplitSeq(out, []byte("\n")) { line = bytes.TrimSpace(line) part := matchLine.FindSubmatch(line) if part != nil { @@ -110,7 +110,7 @@ func start(name string) error { return nil } // If we got a _connect value then try to connect to it - const maxTries = 30 + const maxTries = 100 var rdBuf = make([]byte, 1) for i := 1; i <= maxTries; i++ { if i != 0 { @@ -175,7 +175,16 @@ func Start(remoteName string) (fn func(), err error) { if running[name] <= 0 { // if server isn't running check to see if this server has // been started already but not by us and stop it if so - if os.Getenv(envKey(name, "type")) == "" && isRunning(name) { + const maxTries = 10 + for i := 1; i <= maxTries; i++ { + if os.Getenv(envKey(name, "type")) == "" && !isRunning(name) { + fs.Logf(name, "Stopped server") + break + } + if i != 1 { + time.Sleep(time.Second) + fs.Logf(name, "Attempting to stop %s try %d/%d", name, i, maxTries) + } stop(name) } if !isRunning(name) { @@ -211,6 +220,6 @@ func stop(name string) { fs.Errorf(name, "Failed to stop server: %v", err) } running[name] = 0 - fs.Logf(name, "Stopped server") + fs.Logf(name, "Stopping server") } } diff --git a/go.mod b/go.mod index 8cb3066b9..a7f6ea9b7 100644 --- a/go.mod +++ b/go.mod @@ -4,36 +4,36 @@ go 1.24.0 require ( bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2 github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2 github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 - github.com/Files-com/files-sdk-go/v3 v3.2.218 + github.com/Files-com/files-sdk-go/v3 v3.2.242 github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 github.com/abbot/go-http-auth v0.4.0 github.com/anacrolix/dms v1.7.2 - github.com/anacrolix/log v0.16.0 + github.com/anacrolix/log v0.17.0 github.com/atotto/clipboard v0.1.4 - github.com/aws/aws-sdk-go-v2 v1.38.0 - github.com/aws/aws-sdk-go-v2/config v1.31.0 - github.com/aws/aws-sdk-go-v2/credentials v1.18.4 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 - github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0 - github.com/aws/smithy-go v1.22.5 + github.com/aws/aws-sdk-go-v2 v1.39.1 + github.com/aws/aws-sdk-go-v2/config v1.31.10 + github.com/aws/aws-sdk-go-v2/credentials v1.18.14 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.8 + github.com/aws/aws-sdk-go-v2/service/s3 v1.88.2 + github.com/aws/smithy-go v1.23.0 github.com/buengese/sgzip v0.1.1 - github.com/cloudinary/cloudinary-go/v2 v2.12.0 + github.com/cloudinary/cloudinary-go/v2 v2.13.0 github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc github.com/colinmarc/hdfs/v2 v2.4.0 github.com/coreos/go-semver v0.3.1 - github.com/coreos/go-systemd/v22 v22.5.0 + github.com/coreos/go-systemd/v22 v22.6.0 github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00 github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 - github.com/gabriel-vasile/mimetype v1.4.9 - github.com/gdamore/tcell/v2 v2.8.1 - github.com/go-chi/chi/v5 v5.2.2 + github.com/gabriel-vasile/mimetype v1.4.10 + github.com/gdamore/tcell/v2 v2.9.0 + github.com/go-chi/chi/v5 v5.2.3 github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 github.com/go-git/go-billy/v5 v5.6.2 github.com/google/uuid v1.6.0 @@ -47,49 +47,49 @@ require ( github.com/klauspost/compress v1.18.0 github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 - github.com/lanrat/extsort v1.4.0 + github.com/lanrat/extsort v1.4.2 github.com/mattn/go-colorable v0.1.14 - github.com/mattn/go-runewidth v0.0.16 + github.com/mattn/go-runewidth v0.0.17 github.com/minio/minio-go/v7 v7.0.95 github.com/mitchellh/go-homedir v1.1.0 github.com/moby/sys/mountinfo v0.7.2 github.com/ncw/swift/v2 v2.0.4 - github.com/oracle/oci-go-sdk/v65 v65.98.0 + github.com/oracle/oci-go-sdk/v65 v65.101.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/peterh/liner v1.2.2 github.com/pkg/sftp v1.13.9 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 - github.com/prometheus/client_golang v1.23.0 + github.com/prometheus/client_golang v1.23.2 github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 - github.com/quasilyte/go-ruleguard/dsl v0.3.22 + github.com/quasilyte/go-ruleguard/dsl v0.3.23 github.com/rclone/gofakes3 v0.0.4 github.com/rfjakob/eme v1.1.2 github.com/rivo/uniseg v0.4.7 github.com/rogpeppe/go-internal v1.14.1 - github.com/shirou/gopsutil/v4 v4.25.7 + github.com/shirou/gopsutil/v4 v4.25.8 github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.7 - github.com/stretchr/testify v1.10.0 - github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 + github.com/spf13/cobra v1.10.1 + github.com/spf13/pflag v1.0.10 + github.com/stretchr/testify v1.11.1 + github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c github.com/unknwon/goconfig v1.0.0 github.com/willscott/go-nfs v0.0.3 - github.com/winfsp/cgofuse v1.6.0 + github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471 github.com/xanzy/ssh-agent v0.3.3 github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 github.com/yunify/qingstor-sdk-go/v3 v3.2.0 github.com/zeebo/blake3 v0.2.4 github.com/zeebo/xxh3 v1.0.2 - go.etcd.io/bbolt v1.4.2 - goftp.io/server/v2 v2.0.1 - golang.org/x/crypto v0.41.0 - golang.org/x/net v0.43.0 - golang.org/x/oauth2 v0.30.0 - golang.org/x/sync v0.16.0 - golang.org/x/sys v0.35.0 - golang.org/x/text v0.28.0 - golang.org/x/time v0.12.0 - google.golang.org/api v0.247.0 + go.etcd.io/bbolt v1.4.3 + goftp.io/server/v2 v2.0.2 + golang.org/x/crypto v0.42.0 + golang.org/x/net v0.44.0 + golang.org/x/oauth2 v0.31.0 + golang.org/x/sync v0.17.0 + golang.org/x/sys v0.36.0 + golang.org/x/text v0.29.0 + golang.org/x/time v0.13.0 + google.golang.org/api v0.250.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/validator.v2 v2.0.1 gopkg.in/yaml.v3 v3.0.1 @@ -97,11 +97,11 @@ require ( ) require ( - cloud.google.com/go/auth v0.16.4 // indirect + cloud.google.com/go/auth v0.16.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/compute/metadata v0.8.0 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect @@ -109,24 +109,25 @@ require ( github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect github.com/PuerkitoBio/goquery v1.10.3 // indirect github.com/akavel/rsrc v0.10.2 // indirect - github.com/anacrolix/generics v0.0.3 // indirect + github.com/anacrolix/generics v0.1.0 // indirect github.com/andybalholm/cascadia v1.3.3 // indirect github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/bradenaw/juniper v0.15.3 // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/calebcase/tmpfile v1.0.3 // indirect @@ -139,7 +140,7 @@ require ( github.com/cronokirby/saferith v0.33.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/ebitengine/purego v0.8.4 // indirect + github.com/ebitengine/purego v0.9.0 // indirect github.com/emersion/go-message v0.18.2 // indirect github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -163,7 +164,6 @@ require ( github.com/google/s2a-go v0.1.9 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect - github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect github.com/gorilla/schema v1.4.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -183,8 +183,8 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lpar/date v1.0.0 // indirect - github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect + github.com/lucasb-eyer/go-colorful v1.3.0 // indirect + github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/minio/crc64nvme v1.1.1 // indirect github.com/minio/md5-simd v1.1.2 // indirect @@ -199,10 +199,10 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_model v0.6.2 // indirect - github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.17.0 // indirect github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 // indirect - github.com/relvacode/iso8601 v1.6.0 // indirect + github.com/relvacode/iso8601 v1.7.0 // indirect github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect @@ -210,30 +210,31 @@ require ( github.com/samber/lo v1.51.0 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect github.com/sirupsen/logrus v1.9.3 // indirect + github.com/smartystreets/goconvey v1.8.1 // indirect github.com/sony/gobreaker v1.0.0 // indirect github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect - github.com/tinylib/msgp v1.3.0 // indirect + github.com/tinylib/msgp v1.4.0 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect github.com/tklauser/numcpus v0.10.0 // indirect github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/errs v1.4.0 // indirect go.mongodb.org/mongo-driver v1.17.4 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect - go.opentelemetry.io/otel v1.37.0 // indirect - go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/trace v1.37.0 // indirect - go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 // indirect - golang.org/x/tools v0.36.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect - google.golang.org/grpc v1.74.2 // indirect - google.golang.org/protobuf v1.36.7 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect + golang.org/x/tools v0.37.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9 // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.9 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect moul.io/http2curl/v2 v2.3.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect - storj.io/common v0.0.0-20250808122759-804533d519c1 // indirect + storj.io/common v0.0.0-20250918032746-784a656bec7e // indirect storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect storj.io/infectious v0.0.2 // indirect @@ -246,6 +247,7 @@ require ( github.com/ProtonMail/go-crypto v1.3.0 github.com/golang-jwt/jwt/v4 v4.5.2 github.com/pkg/xattr v0.4.12 - golang.org/x/mobile v0.0.0-20250808145247-395d808d53cd - golang.org/x/term v0.34.0 + github.com/pquerna/otp v1.5.0 + golang.org/x/mobile v0.0.0-20250911085028-6912353760cf + golang.org/x/term v0.35.0 ) diff --git a/go.sum b/go.sum index 409c60c5a..f3a86c7dc 100644 --- a/go.sum +++ b/go.sum @@ -15,8 +15,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/auth v0.16.4 h1:fXOAIQmkApVvcIn7Pc2+5J8QTMVbUGLscnSVNl11su8= -cloud.google.com/go/auth v0.16.4/go.mod h1:j10ncYwjX/g3cdX7GpEzsdM+d+ZNsXAbb6qXA7p1Y5M= +cloud.google.com/go/auth v0.16.5 h1:mFWNQ2FEVWAliEQWpAdH80omXFokmrnbDhUS9cBywsI= +cloud.google.com/go/auth v0.16.5/go.mod h1:utzRfHMP+Vv0mpOkTRQoWD2q3BatTOoWbA7gCc2dUhQ= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -25,8 +25,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA= -cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -39,10 +39,10 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 h1:Hr5FTipp7SL07o2FvoVOX9HRiRH3CR3Mj8pxqCcdD5A= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= @@ -57,12 +57,12 @@ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= -github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Files-com/files-sdk-go/v3 v3.2.218 h1:tIvcbHXNY/bq+Sno6vajOJOxhe5XbU59Fa1ohOybK+s= -github.com/Files-com/files-sdk-go/v3 v3.2.218/go.mod h1:E0BaGQbcMUcql+AfubCR/iasWKBxX5UZPivnQGC2z0M= +github.com/Files-com/files-sdk-go/v3 v3.2.242 h1:mE2LHt6hpwacgntXIATo0JJ6MW2Hcthd3V4+GHrdlg4= +github.com/Files-com/files-sdk-go/v3 v3.2.242/go.mod h1:9nNJzlafE8PnMYGb8zbEKzWsVxfgx/LV2faJgP9HIZ0= github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk= github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= @@ -98,10 +98,10 @@ github.com/akavel/rsrc v0.10.2 h1:Zxm8V5eI1hW4gGaYsJQUhxpjkENuG91ki8B4zCrvEsw= github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/anacrolix/dms v1.7.2 h1:JAAJJIlXp+jT2yEah1EbR1AFpGALHL238uSKFXec2qw= github.com/anacrolix/dms v1.7.2/go.mod h1:excFJW5MKBhn5yt5ZMyeE9iFVqnO6tEGQl7YG/2tUoQ= -github.com/anacrolix/generics v0.0.3 h1:wMkQgQzq0obSy1tMkxDu7Ife7PsegOBWHDRaSW31EnM= -github.com/anacrolix/generics v0.0.3/go.mod h1:MN3ve08Z3zSV/rTuX/ouI4lNdlfTxgdafQJiLzyNRB8= -github.com/anacrolix/log v0.16.0 h1:DSuyb5kAJwl3Y0X1TRcStVrTS9ST9b0BHW+7neE4Xho= -github.com/anacrolix/log v0.16.0/go.mod h1:m0poRtlr41mriZlXBQ9SOVZ8yZBkLjOkDhd5Li5pITA= +github.com/anacrolix/generics v0.1.0 h1:r6OgogjCdml3K5A8ixUG0X9DM4jrQiMfIkZiBOGvIfg= +github.com/anacrolix/generics v0.1.0/go.mod h1:MN3ve08Z3zSV/rTuX/ouI4lNdlfTxgdafQJiLzyNRB8= +github.com/anacrolix/log v0.17.0 h1:cZvEGRPCbIg+WK+qAxWj/ap2Gj8cx1haOCSVxNZQpK4= +github.com/anacrolix/log v0.17.0/go.mod h1:m0poRtlr41mriZlXBQ9SOVZ8yZBkLjOkDhd5Li5pITA= github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU= @@ -110,46 +110,48 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go-v2 v1.38.0 h1:UCRQ5mlqcFk9HJDIqENSLR3wiG1VTWlyUfLDEvY7RxU= -github.com/aws/aws-sdk-go-v2 v1.38.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg= -github.com/aws/aws-sdk-go-v2/config v1.31.0 h1:9yH0xiY5fUnVNLRWO0AtayqwU1ndriZdN78LlhruJR4= -github.com/aws/aws-sdk-go-v2/config v1.31.0/go.mod h1:VeV3K72nXnhbe4EuxxhzsDc/ByrCSlZwUnWH52Nde/I= -github.com/aws/aws-sdk-go-v2/credentials v1.18.4 h1:IPd0Algf1b+Qy9BcDp0sCUcIWdCQPSzDoMK3a8pcbUM= -github.com/aws/aws-sdk-go-v2/credentials v1.18.4/go.mod h1:nwg78FjH2qvsRM1EVZlX9WuGUJOL5od+0qvm0adEzHk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 h1:GicIdnekoJsjq9wqnvyi2elW6CGMSYKhdozE7/Svh78= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3/go.mod h1:R7BIi6WNC5mc1kfRM7XM/VHC3uRWkjc396sfabq4iOo= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 h1:o9RnO+YZ4X+kt5Z7Nvcishlz0nksIt2PIzDglLMP0vA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3/go.mod h1:+6aLJzOG1fvMOyzIySYjOFjcguGvVRL68R+uoRencN4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 h1:joyyUFhiTQQmVK6ImzNU9TQSNRNeD9kOklqTzyk5v6s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3/go.mod h1:+vNIyZQP3b3B1tSLI0lxvrU9cfM7gpdRXMFfm67ZcPc= +github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE= +github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= +github.com/aws/aws-sdk-go-v2/config v1.31.10 h1:7LllDZAegXU3yk41mwM6KcPu0wmjKGQB1bg99bNdQm4= +github.com/aws/aws-sdk-go-v2/config v1.31.10/go.mod h1:Ge6gzXPjqu4v0oHvgAwvGzYcK921GU0hQM25WF/Kl+8= +github.com/aws/aws-sdk-go-v2/credentials v1.18.14 h1:TxkI7QI+sFkTItN/6cJuMZEIVMFXeu2dI1ZffkXngKI= +github.com/aws/aws-sdk-go-v2/credentials v1.18.14/go.mod h1:12x4Uw/vijC11XkctTjy92TNCQ+UnNJkT7fzX0Yd93E= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 h1:gLD09eaJUdiszm7vd1btiQUYE0Hj+0I2b8AS+75z9AY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.8 h1:QcAh/TNGM3MWe95ilMWwnieXWXsyM33Mb/RuTGlWLm4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.8/go.mod h1:72m/ZCCgYpXJzsgI8uJFYMnXEjtZ4kkaolL9NRXLSnU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 h1:ZV2XK2L3HBq9sCKQiQ/MdhZJppH/rH0vddEAamsHUIs= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3/go.mod h1:b9F9tk2HdHpbf3xbN7rUZcfmJI26N6NcJu/8OsBFI/0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 h1:3ZKmesYBaFX33czDl6mbrcHb6jeheg6LqjJhQdefhsY= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3/go.mod h1:7ryVb78GLCnjq7cw45N6oUb9REl7/vNUwjvIqC5UgdY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 h1:ieRzyHXypu5ByllM7Sp4hC5f/1Fy5wqxqY0yB85hC7s= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3/go.mod h1:O5ROz8jHiOAKAwx179v+7sHMhfobFVi6nZt8DEyiYoM= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 h1:SE/e52dq9a05RuxzLcjT+S5ZpQobj3ie3UTaSf2NnZc= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3/go.mod h1:zkpvBTsR020VVr8TOrwK2TrUW9pOir28sH5ECHpnAfo= -github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0 h1:egoDf+Geuuntmw79Mz6mk9gGmELCPzg5PFEABOHB+6Y= -github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0/go.mod h1:t9MDi29H+HDbkolTSQtbI0HP9DemAWQzUjmWC7LGMnE= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 h1:Mc/MKBf2m4VynyJkABoVEN+QzkfLqGj0aiJuEe7cMeM= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.0/go.mod h1:iS5OmxEcN4QIPXARGhavH7S8kETNL11kym6jhoS7IUQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 h1:6csaS/aJmqZQbKhi1EyEMM7yBW653Wy/B9hnBofW+sw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0/go.mod h1:59qHWaY5B+Rs7HGTuVGaC32m0rdpQ68N8QCN3khYiqs= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 h1:MG9VFW43M4A8BYeAfaJJZWrroinxeTi2r3+SnmLQfSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0/go.mod h1:JdeBDPgpJfuS6rU/hNglmOigKhyEZtBmbraLE4GK1J8= -github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= -github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.8 h1:1/bT9kDdLQzfZ1e6J6hpW+SfNDd6xrV8F3M2CuGyUz8= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.8/go.mod h1:RbdwTONAIi59ej/+1H+QzZORt5bcyAtbrS7FQb2pvz0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.8 h1:tIN8MFT1z5STK5kTdOT1TCfMN/bn5fSEnlKsTL8qBOU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.8/go.mod h1:VKS56txtNWjKI8FqD/hliL0BcshyF4ZaLBa1rm2Y+5s= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 h1:M6JI2aGFEzYxsF6CXIuRBnkge9Wf9a2xU39rNeXgu10= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8/go.mod h1:Fw+MyTwlwjFsSTE31mH211Np+CUslml8mzc0AFEG09s= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.8 h1:AgYCo1Rb8XChJXA871BXHDNxNWOTAr6V5YdsRIBbgv0= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.8/go.mod h1:Au9dvIGm1Hbqnt29d3VakOCQuN9l0WrkDDTRq8biWS4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.2 h1:T7b3qniouutV5Wwa9B1q7gW+Y8s1B3g9RE9qa7zLBIM= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.2/go.mod h1:tW9TsLb6t1eaTdBE6LITyJW1m/+DjQPU78Q/jT2FJu8= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 h1:FTdEN9dtWPB0EOURNtDPmwGp6GGvMqRJCAihkSl/1No= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.4/go.mod h1:mYubxV9Ff42fZH4kexj43gFPhgc/LyC7KqvUKt1watc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 h1:I7ghctfGXrscr7r1Ga/mDqSJKm7Fkpl5Mwq79Z+rZqU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo= github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= @@ -175,8 +177,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= -github.com/cloudinary/cloudinary-go/v2 v2.12.0 h1:uveBJeNpJztKDwFW/B+Wuklq584hQmQXlo+hGTSOGZ8= -github.com/cloudinary/cloudinary-go/v2 v2.12.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo= +github.com/cloudinary/cloudinary-go/v2 v2.13.0 h1:ugiQwb7DwpWQnete2AZkTh94MonZKmxD7hDGy1qTzDs= +github.com/cloudinary/cloudinary-go/v2 v2.13.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo= github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg= github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc/go.mod h1:CgWpFCFWzzEA5hVkhAc6DZZzGd3czx+BblvOzjmg6KA= github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc h1:0xCWmFKBmarCqqqLeM7jFBSw/Or81UEElFqO8MY+GDs= @@ -188,8 +190,8 @@ github.com/colinmarc/hdfs/v2 v2.4.0 h1:v6R8oBx/Wu9fHpdPoJJjpGSUxo8NhHIwrwsfhFvU9 github.com/colinmarc/hdfs/v2 v2.4.0/go.mod h1:0NAO+/3knbMx6+5pCv+Hcbaz4xn/Zzbn9+WIib2rKVI= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -209,11 +211,10 @@ github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXI github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5/go.mod h1:rSS3kM9XMzSQ6pw91Qgd6yB5jdt70N4OdtrAf74As5M= github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI= github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= -github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k= +github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7x/Lpg= github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA= github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg= @@ -232,20 +233,20 @@ github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0X github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= -github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= +github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0= +github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= github.com/gdamore/encoding v1.0.1 h1:YzKZckdBL6jVt2Gc+5p82qhrGiqMdG/eNs6Wy0u3Uhw= github.com/gdamore/encoding v1.0.1/go.mod h1:0Z0cMFinngz9kS1QfMjCP8TY7em3bZYeeklsSDPivEo= -github.com/gdamore/tcell/v2 v2.8.1 h1:KPNxyqclpWpWQlPLx6Xui1pMk8S+7+R37h3g07997NU= -github.com/gdamore/tcell/v2 v2.8.1/go.mod h1:bj8ori1BG3OYMjmb3IklZVWfZUJ1UBQt9JXrOCOhGWw= +github.com/gdamore/tcell/v2 v2.9.0 h1:N6t+eqK7/xwtRPwxzs1PXeRWnm0H9l02CrgJ7DLn1ys= +github.com/gdamore/tcell/v2 v2.9.0/go.mod h1:8/ZoqM9rxzYphT9tH/9LnunhV9oPBqwS8WHGYm5nrmo= github.com/geoffgarside/ber v1.2.0 h1:/loowoRcs/MWLYmGX9QtIAbA+V/FrnVLsMMPhwiRm64= github.com/geoffgarside/ber v1.2.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc= github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E= github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= -github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= -github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE= +github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68= github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348/go.mod h1:Czxo/d1g948LtrALAZdL04TL/HnkopquAjxYUuI02bo= github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= @@ -281,7 +282,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= @@ -351,9 +351,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= @@ -399,7 +398,6 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 h1:ZxO6Qr2GOXPdcW80Mcn3nemvilMPvpWqxrNfK2ZnNNs= github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3/go.mod h1:dvLUr/8Fs9a2OBrEnCC5duphbkz/k/mSy5OkXg3PAgI= github.com/josephspurrier/goversioninfo v1.5.0 h1:9TJtORoyf4YMoWSOo/cXFN9A/lB3PniJ91OxIH6e7Zg= @@ -423,7 +421,6 @@ github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYW github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 h1:CjEMN21Xkr9+zwPmZPaJJw+apzVbjGL5uK/6g9Q2jGU= github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988/go.mod h1:/agobYum3uo/8V6yPVnq+R82pyVGCeuWW5arT4Txn8A= github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 h1:FHVoZMOVRA+6/y4yRlbiR3WvsrOcKBd/f64H7YiWR2U= @@ -440,31 +437,29 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lanrat/extsort v1.4.0 h1:jysS/Tjnp7mBwJ6NG8SY+XYFi8HF3LujGbqY9jOWjco= -github.com/lanrat/extsort v1.4.0/go.mod h1:hceP6kxKPKebjN1RVrDBXMXXECbaI41Y94tt6MDazc4= +github.com/lanrat/extsort v1.4.2 h1:akbLIdo4PhNZtvjpaWnbXtGMmLtnGzXplkzfgl+XTTY= +github.com/lanrat/extsort v1.4.2/go.mod h1:hceP6kxKPKebjN1RVrDBXMXXECbaI41Y94tt6MDazc4= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lpar/date v1.0.0 h1:bq/zVqFTUmsxvd/CylidY4Udqpr9BOFrParoP6p0x/I= github.com/lpar/date v1.0.0/go.mod h1:KjYe0dDyMQTgpqcUz4LEIeM5VZwhggjVx/V2dtc8NSo= -github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= -github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc= -github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= +github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag= +github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 h1:mFWunSatvkQQDhpdyuFAYwyAan3hzCuma+Pz8sqvOfg= +github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= -github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ= +github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI= github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= github.com/minio/minio-go/v7 v7.0.95 h1:ywOUPg+PebTMTzn9VDsoFJy32ZuARN9zhB+K3IYEvYU= github.com/minio/minio-go/v7 v7.0.95/go.mod h1:wOOX3uxS334vImCNRVyIDdXX9OsXDm89ToynKgqUKlo= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/xxml v0.0.3 h1:ZIpPQpfyG5uZQnqqC0LZuWtPk/WT8G/qkxvO6jb7zMU= github.com/minio/xxml v0.0.3/go.mod h1:wcXErosl6IezQIMEWSK/LYC2VS7LJ1dAkgvuyIN3aH4= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -491,8 +486,8 @@ github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= -github.com/oracle/oci-go-sdk/v65 v65.98.0 h1:ZKsy97KezSiYSN1Fml4hcwjpO+wq01rjBkPqIiUejVc= -github.com/oracle/oci-go-sdk/v65 v65.98.0/go.mod h1:RGiXfpDDmRRlLtqlStTzeBjjdUNXyqm3KXKyLCm3A/Q= +github.com/oracle/oci-go-sdk/v65 v65.101.0 h1:EErMOuw98JXi0P7DgPg5zjouCA5s61iWD5tFWNCVLHk= +github.com/oracle/oci-go-sdk/v65 v65.101.0/go.mod h1:RGiXfpDDmRRlLtqlStTzeBjjdUNXyqm3KXKyLCm3A/Q= github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg= github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= @@ -519,31 +514,32 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= -github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs= +github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= -github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= -github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8= github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= -github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= -github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.23 h1:lxjt5B6ZCiBeeNO8/oQsegE6fLeCzuMRoVWSkXC4uvY= +github.com/quasilyte/go-ruleguard/dsl v0.3.23/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quic-go/quic-go v0.53.0 h1:QHX46sISpG2S03dPeZBgVIZp8dGagIaiu2FiVYvpCZI= github.com/quic-go/quic-go v0.53.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 h1:UVArwN/wkKjMVhh2EQGC0tEc1+FqiLlvYXY5mQ2f8Wg= github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o= github.com/rclone/gofakes3 v0.0.4 h1:LswpC49VY/UJ1zucoL5ktnOEX6lq3qK7e1aFIAfqCbk= github.com/rclone/gofakes3 v0.0.4/go.mod h1:j/UoS+2/Mr7xAlfKhyVC58YyFQmh9uoQA5YZQXQUqmg= -github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU= -github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= +github.com/relvacode/iso8601 v1.7.0 h1:BXy+V60stMP6cpswc+a93Mq3e65PfXCgDFfhvNNGrdo= +github.com/relvacode/iso8601 v1.7.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4= github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -562,18 +558,17 @@ github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRo github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc= -github.com/shirou/gopsutil/v4 v4.25.7 h1:bNb2JuqKuAu3tRlPv5piSmBZyMfecwQ+t/ILq+1JqVM= -github.com/shirou/gopsutil/v4 v4.25.7/go.mod h1:XV/egmwJtd3ZQjBpJVY5kndsiOO4IRqy9TQnmm6VP7U= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/shirou/gopsutil/v4 v4.25.8 h1:NnAsw9lN7587WHxjJA9ryDnqhJpFH6A+wagYWTOH970= +github.com/shirou/gopsutil/v4 v4.25.8/go.mod h1:q9QdMmfAOVIw7a+eF86P7ISEU6ka+NLgkUxlopV4RwI= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= github.com/snabb/httpreaderat v1.0.1 h1:whlb+vuZmyjqVop8x1EKOg05l2NE4z9lsMMXjmSUCnY= github.com/snabb/httpreaderat v1.0.1/go.mod h1:lpbGrKDWF37yvRbtRvQsbesS6Ty5c83t8ztannPoMsA= github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= @@ -581,13 +576,12 @@ github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ= github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spacemonkeygo/monkit/v3 v3.0.24 h1:cKixJ+evHnfJhWNyIZjBy5hoW8LTWmrJXPo18tzLNrk= github.com/spacemonkeygo/monkit/v3 v3.0.24/go.mod h1:XkZYGzknZwkD0AKUnZaSXhRiVTLCkq7CWVa3IsE72gA= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= @@ -602,13 +596,15 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 h1:Sa+sR8aaAMFwxhXWENEnE6ZpqhZ9d7u1RT2722Rw6hc= github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5/go.mod h1:UdZiFUFu6e2WjjtjxivwXWcwc1N/8zgbkBR9QNucUOY= +github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c h1:BLopNCyqewbE8+BtlIp/Juzu8AJGxz0gHdGADnsblVc= +github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c/go.mod h1:ykucQyiE9Q2qx1wLlEtZkkNn1IURib/2O+Mvd25i1Fo= github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= -github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww= -github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.4.0 h1:SYOeDRiydzOw9kSiwdYp9UcBgPFtLU2WDHaJXyHruf8= +github.com/tinylib/msgp v1.4.0/go.mod h1:cvjFkb4RiC8qSBOPMGPSzSAx47nAsfhLVTCZZNuHv5o= github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= @@ -625,8 +621,8 @@ github.com/willscott/go-nfs v0.0.3 h1:Z5fHVxMsppgEucdkKBN26Vou19MtEM875NmRwj156R github.com/willscott/go-nfs v0.0.3/go.mod h1:VhNccO67Oug787VNXcyx9JDI3ZoSpqoKMT/lWMhUIDg= github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 h1:U0DnHRZFzoIV1oFEZczg5XyPut9yxk9jjtax/9Bxr/o= github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00/go.mod h1:Tq++Lr/FgiS3X48q5FETemXiSLGuYMQT2sPjYNPJSwA= -github.com/winfsp/cgofuse v1.6.0 h1:re3W+HTd0hj4fISPBqfsrwyvPFpzqhDu8doJ9nOPDB0= -github.com/winfsp/cgofuse v1.6.0/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= +github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471 h1:aSOo0k+aLWdhUQiUxzv4cZ7cUp3OLP+Qx7cjs6OUxME= +github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM= @@ -650,8 +646,8 @@ github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I= -go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM= +go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo= +go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E= go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw= go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -659,35 +655,33 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= -go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= -go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= -goftp.io/server/v2 v2.0.1 h1:H+9UbCX2N206ePDSVNCjBftOKOgil6kQ5RAQNx5hJwE= -goftp.io/server/v2 v2.0.1/go.mod h1:7+H/EIq7tXdfo1Muu5p+l3oQ6rYkDZ8lY7IM5d5kVdQ= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +goftp.io/server/v2 v2.0.2 h1:tkZpqyXys+vC15W5yGMi8Kzmbv1QSgeKr8qJXBnJbm8= +goftp.io/server/v2 v2.0.2/go.mod h1:Fl1WdcV7fx1pjOWx7jEHb7tsJ8VwE7+xHu6bVJ6r2qg= golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4= golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -701,8 +695,8 @@ golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= +golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -713,8 +707,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4= -golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg= +golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU= +golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -729,8 +723,8 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20250808145247-395d808d53cd h1:Qd7qm8Xr8riwtdI4F+SWrlnKK/7tLDyTQ5YNv42tvtU= -golang.org/x/mobile v0.0.0-20250808145247-395d808d53cd/go.mod h1:Rg5Br31eIKqfc+43CRdWRfPfFqV9DjN92usHvW9563E= +golang.org/x/mobile v0.0.0-20250911085028-6912353760cf h1:2HVicFltkNthxuudLg8n5TzyNVUESF91+X7+/fxEjSM= +golang.org/x/mobile v0.0.0-20250911085028-6912353760cf/go.mod h1:tfwPrSLpQwNZm2LZ6L4ol2VGzxz+xdyj0fN+n4A50OQ= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -743,8 +737,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -753,7 +747,6 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -784,16 +777,16 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= +golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= +golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -810,13 +803,12 @@ golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -862,10 +854,9 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -877,10 +868,9 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= +golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -896,20 +886,19 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -952,12 +941,14 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -974,8 +965,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc= -google.golang.org/api v0.247.0/go.mod h1:r1qZOPmxXffXg6xS5uhx16Fa/UFY8QU/K4bfKrnvovM= +google.golang.org/api v0.250.0 h1:qvkwrf/raASj82UegU2RSDGWi/89WkLckn4LuO4lVXM= +google.golang.org/api v0.250.0/go.mod h1:Y9Uup8bDLJJtMzJyQnu+rLRJLA0wn+wTtc6vTlOvfXo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1013,10 +1004,10 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4= google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY= -google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a h1:tPE/Kp+x9dMSwUm/uM0JKK0IfdiJkwAbSMSeZBXXJXc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU= +google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9 h1:V1jCN2HBa8sySkR5vLcCSqJSTMv093Rw9EJefhQGP7M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1029,8 +1020,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4= -google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1041,14 +1032,13 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -1075,8 +1065,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= -storj.io/common v0.0.0-20250808122759-804533d519c1 h1:z7ZjU+TlPZ2Lq2S12hT6+Fr7jFsBxPMrPBH4zZpZuUA= -storj.io/common v0.0.0-20250808122759-804533d519c1/go.mod h1:YNr7/ty6CmtpG5C9lEPtPXK3hOymZpueCb9QCNuPMUY= +storj.io/common v0.0.0-20250918032746-784a656bec7e h1:wBeNT7CA1Qwnm8jGP+mKp/IW12vhytCGjVSCKeEF6xM= +storj.io/common v0.0.0-20250918032746-784a656bec7e/go.mod h1:YNr7/ty6CmtpG5C9lEPtPXK3hOymZpueCb9QCNuPMUY= storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 h1:8OE12DvUnB9lfZcHe7IDGsuhjrY9GBAr964PVHmhsro= storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55/go.mod h1:Y9LZaa8esL1PW2IDMqJE7CFSNq7d5bQ3RI7mGPtmKMg= storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 h1:5MZ0CyMbG6Pi0rRzUWVG6dvpXjbBYEX2oyXuj+tT+sk= diff --git a/lib/encoder/encoder.go b/lib/encoder/encoder.go index c8f41d76c..faefdfb3c 100644 --- a/lib/encoder/encoder.go +++ b/lib/encoder/encoder.go @@ -184,8 +184,8 @@ func (mask MultiEncoder) String() string { // Set converts a string into a MultiEncoder func (mask *MultiEncoder) Set(in string) error { var out MultiEncoder - parts := strings.Split(in, ",") - for _, part := range parts { + parts := strings.SplitSeq(in, ",") + for part := range parts { part = strings.TrimSpace(part) if bits, ok := nameToEncoding[part]; ok { out |= bits diff --git a/lib/http/auth.go b/lib/http/auth.go index 6fead3827..babd91163 100644 --- a/lib/http/auth.go +++ b/lib/http/auth.go @@ -20,7 +20,7 @@ You can either use an htpasswd file which can take lots of users, or set a single username and password with the ` + "`--{{ .Prefix }}user` and `--{{ .Prefix }}pass`" + ` flags. Alternatively, you can have the reverse proxy manage authentication and use the -username provided in the configured header with ` + "`--user-from-header`" + ` (e.g., ` + "`--{{ .Prefix }}--user-from-header=x-remote-user`" + `). +username provided in the configured header with ` + "`--user-from-header`" + ` (e.g., ` + "`--{{ .Prefix }}user-from-header=x-remote-user`" + `). Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration may lead to unauthorized access. diff --git a/lib/http/serve/dir.go b/lib/http/serve/dir.go index ae1ead6a0..fd53256b3 100644 --- a/lib/http/serve/dir.go +++ b/lib/http/serve/dir.go @@ -21,6 +21,7 @@ import ( type DirEntry struct { remote string URL string + ZipURL string Leaf string IsDir bool Size int64 @@ -32,6 +33,8 @@ type Directory struct { DirRemote string Title string Name string + ZipURL string + DisableZip bool Entries []DirEntry Query string HTMLTemplate *template.Template @@ -70,6 +73,7 @@ func NewDirectory(dirRemote string, htmlTemplate *template.Template) *Directory DirRemote: dirRemote, Title: fmt.Sprintf("Directory listing of /%s", dirRemote), Name: fmt.Sprintf("/%s", dirRemote), + ZipURL: "?download=zip", HTMLTemplate: htmlTemplate, Breadcrumb: breadcrumb, } @@ -99,11 +103,15 @@ func (d *Directory) AddHTMLEntry(remote string, isDir bool, size int64, modTime d.Entries = append(d.Entries, DirEntry{ remote: remote, URL: rest.URLPathEscape(urlRemote) + d.Query, + ZipURL: "", Leaf: leaf, IsDir: isDir, Size: size, ModTime: modTime, }) + if isDir { + d.Entries[len(d.Entries)-1].ZipURL = rest.URLPathEscape(urlRemote) + "?download=zip" + } } // AddEntry adds an entry to that directory diff --git a/lib/http/serve/dir_test.go b/lib/http/serve/dir_test.go index 19ab22f57..3fd146724 100644 --- a/lib/http/serve/dir_test.go +++ b/lib/http/serve/dir_test.go @@ -46,11 +46,11 @@ func TestAddHTMLEntry(t *testing.T) { d.AddHTMLEntry("a/b/c/colon:colon.txt", false, 64, modtime) d.AddHTMLEntry("\"quotes\".txt", false, 64, modtime) assert.Equal(t, []DirEntry{ - {remote: "", URL: "/", Leaf: "/", IsDir: true, Size: 0, ModTime: modtime}, - {remote: "dir", URL: "dir/", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime}, - {remote: "a/b/c/d.txt", URL: "d.txt", Leaf: "d.txt", IsDir: false, Size: 64, ModTime: modtime}, - {remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", Leaf: "colon:colon.txt", IsDir: false, Size: 64, ModTime: modtime}, - {remote: "\"quotes\".txt", URL: "%22quotes%22.txt", Leaf: "\"quotes\".txt", Size: 64, IsDir: false, ModTime: modtime}, + {remote: "", URL: "/", ZipURL: "/?download=zip", Leaf: "/", IsDir: true, Size: 0, ModTime: modtime}, + {remote: "dir", URL: "dir/", ZipURL: "dir/?download=zip", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime}, + {remote: "a/b/c/d.txt", URL: "d.txt", ZipURL: "", Leaf: "d.txt", IsDir: false, Size: 64, ModTime: modtime}, + {remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", ZipURL: "", Leaf: "colon:colon.txt", IsDir: false, Size: 64, ModTime: modtime}, + {remote: "\"quotes\".txt", URL: "%22quotes%22.txt", ZipURL: "", Leaf: "\"quotes\".txt", Size: 64, IsDir: false, ModTime: modtime}, }, d.Entries) // Now test with a query parameter @@ -58,8 +58,8 @@ func TestAddHTMLEntry(t *testing.T) { d.AddHTMLEntry("file", false, 64, modtime) d.AddHTMLEntry("dir", true, 0, modtime) assert.Equal(t, []DirEntry{ - {remote: "file", URL: "file?potato=42", Leaf: "file", IsDir: false, Size: 64, ModTime: modtime}, - {remote: "dir", URL: "dir/?potato=42", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime}, + {remote: "file", URL: "file?potato=42", ZipURL: "", Leaf: "file", IsDir: false, Size: 64, ModTime: modtime}, + {remote: "dir", URL: "dir/?potato=42", ZipURL: "dir/?download=zip", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime}, }, d.Entries) } diff --git a/lib/http/server.go b/lib/http/server.go index 46531351d..83d0162dd 100644 --- a/lib/http/server.go +++ b/lib/http/server.go @@ -59,6 +59,8 @@ inserts leading and trailing "/" on ` + "`--{{ .Prefix }}baseurl`" + `, so ` + " ` + "`--{{ .Prefix }}baseurl \"/rclone\"` and `--{{ .Prefix }}baseurl \"/rclone/\"`" + ` are all treated identically. +` + "`--{{ .Prefix }}disable-zip`" + ` may be set to disable the zipping download option. + #### TLS (SSL) By default this will serve over http. If you want you can serve over @@ -523,8 +525,6 @@ func (s *Server) initTLS() error { func (s *Server) Serve() { s.wg.Add(len(s.instances)) for _, ii := range s.instances { - // TODO: decide how/when to log listening url - // log.Printf("listening on %s", ii.url) go ii.serve(&s.wg) } // Install an atexit handler to shutdown gracefully diff --git a/lib/http/templates/index.html b/lib/http/templates/index.html index 348050c02..cec58f1d7 100644 --- a/lib/http/templates/index.html +++ b/lib/http/templates/index.html @@ -21,7 +21,7 @@ - @@ -206,6 +219,9 @@ + + + @@ -233,6 +249,15 @@

{{range $i, $crumb := .Breadcrumb}}{{html $crumb.Text}}{{if ne $i 0}}/{{end}}{{end}} + + {{- if not .DisableZip}} + + + + + + {{- end}} +

@@ -283,6 +308,13 @@

{{- end}} {{html .Leaf}} + {{- if and .IsDir (not $.DisableZip)}} + + + + + + {{- end}} {{- if .IsDir}} — diff --git a/lib/mmap/mmap_test.go b/lib/mmap/mmap_test.go index 0fb0b797f..318a8a2ec 100644 --- a/lib/mmap/mmap_test.go +++ b/lib/mmap/mmap_test.go @@ -31,7 +31,7 @@ func BenchmarkAllocFree(b *testing.B) { for _, dirty := range []bool{false, true} { for size := 4096; size <= 32*1024*1024; size *= 2 { b.Run(fmt.Sprintf("%dk,dirty=%v", size>>10, dirty), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { mem := MustAlloc(size) if dirty { mem[0] ^= 0xFF @@ -62,7 +62,7 @@ func BenchmarkAllocFreeWithLotsOfAllocations(b *testing.B) { for preAllocs := 1; preAllocs <= maxAllocs; preAllocs *= 2 { allocs := alloc(preAllocs) b.Run(fmt.Sprintf("%d", preAllocs), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { mem := MustAlloc(size) mem[0] ^= 0xFF MustFree(mem) @@ -90,7 +90,7 @@ func BenchmarkAllocFreeForLotsOfAllocations(b *testing.B) { } for preAllocs := 1; preAllocs <= maxAllocs; preAllocs *= 2 { b.Run(fmt.Sprintf("%d", preAllocs), func(b *testing.B) { - for i := 0; i < b.N; i++ { + for b.Loop() { allocs := alloc(preAllocs) free(allocs) } diff --git a/lib/oauthutil/oauthutil.go b/lib/oauthutil/oauthutil.go index a44ae8028..d0a8edaae 100644 --- a/lib/oauthutil/oauthutil.go +++ b/lib/oauthutil/oauthutil.go @@ -250,9 +250,7 @@ func (ts *TokenSource) reReadToken() (changed bool) { return false } - if !newToken.Valid() { - fs.Debugf(ts.name, "Loaded invalid token from config file - ignoring") - } else { + if newToken.Valid() { fs.Debugf(ts.name, "Loaded fresh token from config file") changed = true } @@ -264,6 +262,8 @@ func (ts *TokenSource) reReadToken() (changed bool) { if changed { ts.token = newToken ts.tokenSource = nil // invalidate since we changed the token + } else { + fs.Debugf(ts.name, "No updated token found in the config file") } return changed } @@ -319,6 +319,8 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) { return ts.token, nil } + fs.Debug(ts.name, "Token expired") + // Try getting the token a few times for i := 1; i <= maxTries; i++ { // Try reading the token from the config file in case it has @@ -344,6 +346,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) { token, err = ts.tokenSource.Token() if err == nil { + fs.Debug(ts.name, "Token refresh successful") break } if newErr := maybeWrapOAuthError(err, ts.name); newErr != err { diff --git a/lib/oauthutil/renew.go b/lib/oauthutil/renew.go index 40eab7843..71357ebf5 100644 --- a/lib/oauthutil/renew.go +++ b/lib/oauthutil/renew.go @@ -47,16 +47,14 @@ func (r *Renew) renewOnExpiry() { } uploads := r.uploads.Load() if uploads != 0 { - fs.Debugf(r.name, "Token expired - %d uploads in progress - refreshing", uploads) + fs.Debugf(r.name, "Background refresher detected expired token - %d uploads in progress - refreshing", uploads) // Do a transaction err := r.run() - if err == nil { - fs.Debugf(r.name, "Token refresh successful") - } else { - fs.Errorf(r.name, "Token refresh failed: %v", err) + if err != nil { + fs.Errorf(r.name, "Background token refresher failed: %v", err) } } else { - fs.Debugf(r.name, "Token expired but no uploads in progress - doing nothing") + fs.Debugf(r.name, "Background refresher detected expired token but no uploads in progress - doing nothing") } } } diff --git a/lib/pacer/pacer.go b/lib/pacer/pacer.go index 1c7cc51e5..005317821 100644 --- a/lib/pacer/pacer.go +++ b/lib/pacer/pacer.go @@ -4,6 +4,8 @@ package pacer import ( "errors" "fmt" + "runtime" + "strings" "sync" "time" @@ -153,13 +155,13 @@ func (p *Pacer) ModifyCalculator(f func(Calculator)) { // This must be called as a pair with endCall. // // This waits for the pacer token -func (p *Pacer) beginCall() { +func (p *Pacer) beginCall(limitConnections bool) { // pacer starts with a token in and whenever we take one out // XXX ms later we put another in. We could do this with a // Ticker more accurately, but then we'd have to work out how // not to run it when it wasn't needed <-p.pacer - if p.maxConnections > 0 { + if limitConnections { <-p.connTokens } @@ -176,8 +178,8 @@ func (p *Pacer) beginCall() { // // This should calculate a new sleepTime. It takes a boolean as to // whether the operation should be retried or not. -func (p *Pacer) endCall(retry bool, err error) { - if p.maxConnections > 0 { +func (p *Pacer) endCall(retry bool, err error, limitConnections bool) { + if limitConnections { p.connTokens <- struct{}{} } p.mu.Lock() @@ -191,13 +193,44 @@ func (p *Pacer) endCall(retry bool, err error) { p.mu.Unlock() } +// Detect the pacer being called reentrantly. +// +// This looks for Pacer.call in the call stack and returns true if it +// is found. +// +// Ideally we would do this by passing a context about but there are +// an awful lot of Pacer calls! +// +// This is only needed when p.maxConnections > 0 which isn't a common +// configuration so adding a bit of extra slowdown here is not a +// problem. +func pacerReentered() bool { + var pcs [48]uintptr + n := runtime.Callers(3, pcs[:]) // skip runtime.Callers, pacerReentered and call + frames := runtime.CallersFrames(pcs[:n]) + for { + f, more := frames.Next() + if strings.HasSuffix(f.Function, "(*Pacer).call") { + return true + } + if !more { + break + } + } + return false +} + // call implements Call but with settable retries func (p *Pacer) call(fn Paced, retries int) (err error) { var retry bool + limitConnections := false + if p.maxConnections > 0 && !pacerReentered() { + limitConnections = true + } for i := 1; i <= retries; i++ { - p.beginCall() + p.beginCall(limitConnections) retry, err = p.invoker(i, retries, fn) - p.endCall(retry, err) + p.endCall(retry, err, limitConnections) if !retry { break } diff --git a/lib/pacer/pacer_test.go b/lib/pacer/pacer_test.go index 3ac9c3741..76ef4b071 100644 --- a/lib/pacer/pacer_test.go +++ b/lib/pacer/pacer_test.go @@ -108,7 +108,7 @@ func waitForPace(p *Pacer, duration time.Duration) (when time.Time) { func TestBeginCall(t *testing.T) { p := New(MaxConnectionsOption(10), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond)))) emptyTokens(p) - go p.beginCall() + go p.beginCall(true) if !waitForPace(p, 10*time.Millisecond).IsZero() { t.Errorf("beginSleep fired too early #1") } @@ -131,7 +131,7 @@ func TestBeginCall(t *testing.T) { func TestBeginCallZeroConnections(t *testing.T) { p := New(MaxConnectionsOption(0), CalculatorOption(NewDefault(MinSleep(1*time.Millisecond)))) emptyTokens(p) - go p.beginCall() + go p.beginCall(false) if !waitForPace(p, 10*time.Millisecond).IsZero() { t.Errorf("beginSleep fired too early #1") } @@ -257,7 +257,7 @@ func TestEndCall(t *testing.T) { p := New(MaxConnectionsOption(5)) emptyTokens(p) p.state.ConsecutiveRetries = 1 - p.endCall(true, nil) + p.endCall(true, nil, true) assert.Equal(t, 1, len(p.connTokens)) assert.Equal(t, 2, p.state.ConsecutiveRetries) } @@ -266,7 +266,7 @@ func TestEndCallZeroConnections(t *testing.T) { p := New(MaxConnectionsOption(0)) emptyTokens(p) p.state.ConsecutiveRetries = 1 - p.endCall(false, nil) + p.endCall(false, nil, false) assert.Equal(t, 0, len(p.connTokens)) assert.Equal(t, 0, p.state.ConsecutiveRetries) } @@ -353,6 +353,41 @@ func TestCallParallel(t *testing.T) { wait.Broadcast() } +func BenchmarkPacerReentered(b *testing.B) { + for b.Loop() { + _ = pacerReentered() + } +} + +func BenchmarkPacerReentered100(b *testing.B) { + var fn func(level int) + fn = func(level int) { + if level > 0 { + fn(level - 1) + return + } + for b.Loop() { + _ = pacerReentered() + } + + } + fn(100) +} + +func TestCallMaxConnectionsRecursiveDeadlock(t *testing.T) { + p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond)))) + p.SetMaxConnections(1) + dp := &dummyPaced{retry: false} + err := p.Call(func() (bool, error) { + // check we have taken the connection token + // no tokens left means deadlock on the recursive call + assert.Equal(t, 0, len(p.connTokens)) + return false, p.Call(dp.fn) + }) + assert.Equal(t, 1, dp.called) + assert.Equal(t, errFoo, err) +} + func TestRetryAfterError_NonNilErr(t *testing.T) { orig := errors.New("test failure") dur := 2 * time.Second diff --git a/lib/pool/pool_test.go b/lib/pool/pool_test.go index 0be3e2400..5c0434e82 100644 --- a/lib/pool/pool_test.go +++ b/lib/pool/pool_test.go @@ -16,23 +16,21 @@ import ( // makes the allocations be unreliable func makeUnreliable(bp *Pool) { - const maxFailsInARow = 10 - var allocFails int + var allocCount int + tests := rand.Intn(4) + 1 bp.alloc = func(size int) ([]byte, error) { - if rand.Intn(3) != 0 && allocFails < maxFailsInARow { - allocFails++ + allocCount++ + if allocCount%tests != 0 { return nil, errors.New("failed to allocate memory") } - allocFails = 0 return make([]byte, size), nil } - var freeFails int + var freeCount int bp.free = func(b []byte) error { - if rand.Intn(3) != 0 && freeFails < maxFailsInARow { - freeFails++ + freeCount++ + if freeCount%tests != 0 { return errors.New("failed to free memory") } - freeFails = 0 return nil } } @@ -290,22 +288,24 @@ func TestPoolMaxBufferMemory(t *testing.T) { } } ) - for i := 0; i < 20; i++ { + const trials = 50 + for i := range trials { wg.Add(1) go func() { defer wg.Done() - if i < 4 { - buf := bp.GetN(i + 1) - countBuf(i + 1) - time.Sleep(100 * time.Millisecond) + if i < trials/2 { + n := i%4 + 1 + buf := bp.GetN(n) + countBuf(n) + time.Sleep(1 * time.Millisecond) + countBuf(-n) bp.PutN(buf) - countBuf(-(i + 1)) } else { buf := bp.Get() countBuf(1) - time.Sleep(100 * time.Millisecond) - bp.Put(buf) + time.Sleep(1 * time.Millisecond) countBuf(-1) + bp.Put(buf) } }() } diff --git a/vfs/vfstest/fs.go b/vfs/vfstest/fs.go index c9db4fa19..0391f855c 100644 --- a/vfs/vfstest/fs.go +++ b/vfs/vfstest/fs.go @@ -212,7 +212,7 @@ type dirMap map[string]struct{} // Create a dirMap from a string func newDirMap(dirString string) (dm dirMap) { dm = make(dirMap) - for _, entry := range strings.Split(dirString, "|") { + for entry := range strings.SplitSeq(dirString, "|") { if entry != "" { dm[entry] = struct{}{} } diff --git a/vfs/zip.go b/vfs/zip.go new file mode 100644 index 000000000..287777bed --- /dev/null +++ b/vfs/zip.go @@ -0,0 +1,73 @@ +package vfs + +import ( + "archive/zip" + "context" + "fmt" + "io" + "os" + + "github.com/rclone/rclone/fs" +) + +// CreateZip creates a zip file from a vfs.Dir writing it to w +func CreateZip(ctx context.Context, dir *Dir, w io.Writer) (err error) { + zipWriter := zip.NewWriter(w) + defer fs.CheckClose(zipWriter, &err) + var walk func(dir *Dir, root string) error + walk = func(dir *Dir, root string) error { + nodes, err := dir.ReadDirAll() + if err != nil { + return fmt.Errorf("create zip directory read: %w", err) + } + for _, node := range nodes { + switch e := node.(type) { + case *File: + in, err := e.Open(os.O_RDONLY) + if err != nil { + return fmt.Errorf("create zip open file: %w", err) + } + header := &zip.FileHeader{ + Name: root + e.Name(), + Method: zip.Deflate, + Modified: e.ModTime(), + } + fileWriter, err := zipWriter.CreateHeader(header) + if err != nil { + fs.CheckClose(in, &err) + return fmt.Errorf("create zip file header: %w", err) + } + _, err = io.Copy(fileWriter, in) + if err != nil { + fs.CheckClose(in, &err) + return fmt.Errorf("create zip copy: %w", err) + } + fs.CheckClose(in, &err) + case *Dir: + name := root + e.Path() + if name != "" && name[len(name)-1] != '/' { + name += "/" + } + header := &zip.FileHeader{ + Name: name, + Method: zip.Store, + Modified: e.ModTime(), + } + _, err := zipWriter.CreateHeader(header) + if err != nil { + return fmt.Errorf("create zip directory header: %w", err) + } + err = walk(e, name) + if err != nil { + return err + } + } + } + return nil + } + err = walk(dir, "") + if err != nil { + return err + } + return nil +} diff --git a/vfs/zip_test.go b/vfs/zip_test.go new file mode 100644 index 000000000..d15be2483 --- /dev/null +++ b/vfs/zip_test.go @@ -0,0 +1,160 @@ +package vfs + +import ( + "archive/zip" + "bytes" + "context" + "crypto/sha256" + "fmt" + "io" + "strings" + "testing" + + "github.com/rclone/rclone/fstest" + "github.com/rclone/rclone/lib/random" + "github.com/stretchr/testify/require" +) + +func readZip(t *testing.T, buf *bytes.Buffer) *zip.Reader { + t.Helper() + r, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + require.NoError(t, err) + return r +} + +func mustCreateZip(t *testing.T, d *Dir) *bytes.Buffer { + t.Helper() + var buf bytes.Buffer + require.NoError(t, CreateZip(context.Background(), d, &buf)) + return &buf +} + +func zipReadFile(t *testing.T, zr *zip.Reader, match func(name string) bool) ([]byte, string) { + t.Helper() + for _, f := range zr.File { + if strings.HasSuffix(f.Name, "/") { + continue + } + if match(f.Name) { + rc, err := f.Open() + require.NoError(t, err) + defer func() { require.NoError(t, rc.Close()) }() + b, err := io.ReadAll(rc) + require.NoError(t, err) + return b, f.Name + } + } + t.Fatalf("zip entry matching predicate not found") + return nil, "" +} + +func TestZipManyFiles(t *testing.T) { + r, vfs := newTestVFS(t) + + const N = 5 + want := make(map[string]string, N) + items := make([]fstest.Item, 0, N) + + for i := range N { + name := fmt.Sprintf("flat/f%03d.txt", i) + data := strings.Repeat(fmt.Sprintf("line-%d\n", i), (i%5)+1) + it := r.WriteObject(context.Background(), name, data, t1) + items = append(items, it) + want[name[strings.LastIndex(name, "/")+1:]] = data + } + r.CheckRemoteItems(t, items...) + + node, err := vfs.Stat("flat") + require.NoError(t, err) + dir := node.(*Dir) + + buf := mustCreateZip(t, dir) + zr := readZip(t, buf) + + // count only file entries (skip dir entries with trailing "/") + files := 0 + for _, f := range zr.File { + if !strings.HasSuffix(f.Name, "/") { + files++ + } + } + require.Equal(t, N, files) + + // validate contents by base name + for base, data := range want { + got, _ := zipReadFile(t, zr, func(name string) bool { return name == base }) + require.Equal(t, data, string(got), "mismatch for %s", base) + } +} + +func TestZipManySubDirs(t *testing.T) { + r, vfs := newTestVFS(t) + + r.WriteObject(context.Background(), "a/top.txt", "top", t1) + r.WriteObject(context.Background(), "a/b/mid.txt", "mid", t1) + r.WriteObject(context.Background(), "a/b/c/deep.txt", "deep", t1) + + node, err := vfs.Stat("a") + require.NoError(t, err) + dir := node.(*Dir) + + buf := mustCreateZip(t, dir) + zr := readZip(t, buf) + + // paths may include directory prefixes; assert by suffix + got, name := zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/top.txt") || n == "top.txt" }) + require.Equal(t, "top", string(got), "bad content for %s", name) + + got, name = zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/mid.txt") || n == "mid.txt" }) + require.Equal(t, "mid", string(got), "bad content for %s", name) + + got, name = zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/deep.txt") || n == "deep.txt" }) + require.Equal(t, "deep", string(got), "bad content for %s", name) +} + +func TestZipLargeFiles(t *testing.T) { + r, vfs := newTestVFS(t) + + if strings.HasPrefix(r.Fremote.Name(), "TestChunker") { + t.Skip("skipping test as chunker too slow") + } + + data := random.String(5 * 1024 * 1024) + sum := sha256.Sum256([]byte(data)) + + r.WriteObject(context.Background(), "bigdir/big.bin", data, t1) + + node, err := vfs.Stat("bigdir") + require.NoError(t, err) + dir := node.(*Dir) + + buf := mustCreateZip(t, dir) + zr := readZip(t, buf) + + got, _ := zipReadFile(t, zr, func(n string) bool { return n == "big.bin" || strings.HasSuffix(n, "/big.bin") }) + require.Equal(t, sum, sha256.Sum256(got)) +} + +func TestZipDirsInRoot(t *testing.T) { + r, vfs := newTestVFS(t) + + r.WriteObject(context.Background(), "dir1/a.txt", "x", t1) + r.WriteObject(context.Background(), "dir2/b.txt", "y", t1) + r.WriteObject(context.Background(), "dir3/c.txt", "z", t1) + + root, err := vfs.Root() + require.NoError(t, err) + + buf := mustCreateZip(t, root) + zr := readZip(t, buf) + + // Check each file exists (ignore exact directory-entry names) + gx, _ := zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/a.txt") }) + require.Equal(t, "x", string(gx)) + + gy, _ := zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/b.txt") }) + require.Equal(t, "y", string(gy)) + + gz, _ := zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/c.txt") }) + require.Equal(t, "z", string(gz)) +} From 3bb44a8ac3a0874dd530c3b3577322db867edbd1 Mon Sep 17 00:00:00 2001 From: divyam234 <47589864+divyam234@users.noreply.github.com> Date: Mon, 28 Apr 2025 19:47:34 +0200 Subject: [PATCH 2/9] feat: added teldrive remote --- fs/rc/jobs/job.go | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/fs/rc/jobs/job.go b/fs/rc/jobs/job.go index 0df3cb7b1..2c83b8c69 100644 --- a/fs/rc/jobs/job.go +++ b/fs/rc/jobs/job.go @@ -191,6 +191,19 @@ func (jobs *Jobs) Get(ID int64) *Job { return jobs.jobs[ID] } +func getJobID(in rc.Params) (int64, bool, error) { + jobID, err := in.GetInt64("_jobid") + if err != nil { + if rc.IsErrParamNotFound(err) { + return 0, false, nil + } else { + return 0, false, err + } + } + delete(in, "_jobid") + return jobID, true, nil +} + // Check to see if the group is set func getGroup(ctx context.Context, in rc.Params, id int64) (context.Context, string, error) { group, err := in.GetString("_group") @@ -261,9 +274,13 @@ var jobKey = jobKeyType{} // NewJob creates a Job and executes it, possibly in the background if _async is set func (jobs *Jobs) NewJob(ctx context.Context, fn rc.Func, in rc.Params) (job *Job, out rc.Params, err error) { - id := jobID.Add(1) in = in.Copy() // copy input so we can change it + customID, hasCustomID, err := getJobID(in) + if err != nil { + return nil, nil, err + } + ctx, isAsync, err := getAsync(ctx, in) if err != nil { return nil, nil, err @@ -279,6 +296,16 @@ func (jobs *Jobs) NewJob(ctx context.Context, fn rc.Func, in rc.Params) (job *Jo return nil, nil, err } + var id int64 + if hasCustomID { + if _, ok := jobs.jobs[customID]; ok { + return nil, nil, fmt.Errorf("job ID %d already in use", customID) + } + id = customID + } else { + id = jobID.Add(1) + } + ctx, group, err := getGroup(ctx, in, id) if err != nil { return nil, nil, err From 251b4c09b6e81de2064d542afdbdbe6609549c40 Mon Sep 17 00:00:00 2001 From: divyam234 <47589864+divyam234@users.noreply.github.com> Date: Sat, 24 Jan 2026 19:48:42 +0530 Subject: [PATCH 3/9] feat: support hashing, unknown sizes, event streams and multithread resume copyurl - Renamed 'http_proxy' to 'proxy' in global configuration options. - Implemented native SOCKS5/SOCKS5h support in the HTTP transport by wrapping the dialer with golang.org/x/net/proxy. - Maintained backward compatibility logic for standard HTTP/HTTPS proxies. - Improved error reporting for invalid proxy configurations. --- backend/all/all.go | Bin 6122 -> 6960 bytes backend/local/local.go | 9 +- backend/teldrive/api/types.go | 7 +- backend/teldrive/tdhash/tdhash.go | 123 ++++++++++ backend/teldrive/teldrive.go | 390 +++++++++++++++++++++--------- backend/teldrive/upload.go | 340 ++++++++++++++++++++------ fs/config.go | 12 +- fs/fshttp/http.go | 6 +- fs/object/object.go | 175 ++++++++++++++ fs/operations/multithread.go | 388 ++++++++++++++++++++++++++++- fs/operations/operations.go | 42 ++++ 11 files changed, 1293 insertions(+), 199 deletions(-) create mode 100644 backend/teldrive/tdhash/tdhash.go diff --git a/backend/all/all.go b/backend/all/all.go index 4cdcdabdc257316f06c10a017aa63082f98d99b4..64be00614a0d5f9a459b20e5ba0d75baa18efb7a 100644 GIT binary patch delta 139 zcmaE*zrk!n93y)ULncEpL&@ZejDdoQ3^_oU!jQ_4#83njPXUQej^d7+ypNGDnF0luU0MHyNj}R7y%{74j}*l diff --git a/backend/local/local.go b/backend/local/local.go index b4462d8ed..e45222b22 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -1542,7 +1542,14 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr return nil, errors.New("can't open a symlink for random writing") } - out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + ci := fs.GetConfig(ctx) + + flags := os.O_WRONLY | os.O_CREATE + if !ci.MultiThreadResume { + flags |= os.O_TRUNC + } + + out, err := file.OpenFile(o.path, flags, 0666) if err != nil { return nil, err } diff --git a/backend/teldrive/api/types.go b/backend/teldrive/api/types.go index cbe2f611a..705f2269a 100644 --- a/backend/teldrive/api/types.go +++ b/backend/teldrive/api/types.go @@ -33,6 +33,7 @@ type FileInfo struct { ParentId string `json:"parentId"` Type string `json:"type"` ModTime time.Time `json:"updatedAt"` + Hash string `json:"hash"` } type Meta struct { @@ -46,7 +47,6 @@ type ReadMetadataResponse struct { Meta Meta `json:"meta"` } -// MetadataRequestOptions represents all the options when listing folder contents type MetadataRequestOptions struct { Page int64 Limit int64 @@ -82,7 +82,8 @@ type CreateFileRequest struct { Encrypted bool `json:"encrypted,omitempty"` Parts []FilePart `json:"parts,omitempty"` ParentId string `json:"parentId,omitempty"` - ModTime time.Time `json:"updatedAt,omitempty"` + ModTime time.Time `json:"updatedAt"` + UploadId string `json:"uploadId"` } type MoveFileRequest struct { @@ -113,7 +114,7 @@ type RemoveFileRequest struct { type CopyFile struct { Newname string `json:"newName"` Destination string `json:"destination"` - ModTime time.Time `json:"updatedAt,omitempty"` + ModTime time.Time `json:"updatedAt"` } type Session struct { diff --git a/backend/teldrive/tdhash/tdhash.go b/backend/teldrive/tdhash/tdhash.go new file mode 100644 index 000000000..f9087e09b --- /dev/null +++ b/backend/teldrive/tdhash/tdhash.go @@ -0,0 +1,123 @@ +// Package tdhash implements the Teldrive hashing algorithm. +// Files are split into 16MB blocks (fixed size). +// Each block is hashed with BLAKE3. +// Block hashes are concatenated and hashed together to produce the final tree hash. +package tdhash + +import ( + "encoding/hex" + "hash" + + "github.com/zeebo/blake3" +) + +const ( + // BlockSize is the fixed block size for tree hashing (16MB). + BlockSize = 16 * 1024 * 1024 + // Size is the expected hex-encoded string length (64 chars for 32-byte blake3 hash). + // Note: Sum() returns 32 raw bytes, rclone will hex-encode for display. + Size = 64 + // rawSize is the actual number of bytes returned by Sum(). + rawSize = 32 +) + +type digest struct { + chunkHash hash.Hash // Hash for current block + totalHash hash.Hash // Tree hash (hash of block hashes) + n int64 // bytes written into current block + sumCalled bool + writtenMore bool +} + +// New creates a new tree hash (uses BLAKE3 with fixed 16MB blocks) +func New() hash.Hash { + d := &digest{} + d.Reset() + return d +} + +// Reset resets the hash to its initial state. +func (d *digest) Reset() { + d.n = 0 + d.sumCalled = false + d.writtenMore = false + + // Always use BLAKE3 + d.chunkHash = blake3.New() + d.totalHash = blake3.New() +} + +// writeChunkHash writes the current chunk hash into the total hash +func (d *digest) writeChunkHash() { + chunkHashBytes := d.chunkHash.Sum(nil) + d.totalHash.Write(chunkHashBytes) + // Reset chunk hasher for next chunk + d.n = 0 + d.chunkHash.Reset() +} + +// Write adds more data to the running hash. +// It processes data in chunks and accumulates chunk hashes. +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + d.writtenMore = true + + for len(p) > 0 { + // Calculate how much we can write to current block + remainingInBlock := BlockSize - d.n + toWrite := min(int64(len(p)), remainingInBlock) + + // Write to current block hash + d.chunkHash.Write(p[:toWrite]) + d.n += toWrite + p = p[toWrite:] + + // If block is full, finalize it and start new block + if d.n >= BlockSize { + d.writeChunkHash() + } + } + + return n, nil +} + +// Sum returns the current hash as raw bytes (32 bytes for BLAKE3). +// Note: rclone's hash system will hex-encode this for display/comparison. +func (d *digest) Sum(b []byte) []byte { + if d.sumCalled && d.writtenMore { + // If Sum was already called and we wrote more data, we need to + // finalize the last chunk if it has data + if d.n > 0 { + d.writeChunkHash() + } + } + + // Finalize last chunk if it has data + if d.n > 0 { + d.writeChunkHash() + } + + d.sumCalled = true + d.writtenMore = false + + // Return raw tree hash bytes (not hex-encoded) + treeHashBytes := d.totalHash.Sum(nil) + return append(b, treeHashBytes...) +} + +// Size returns the number of bytes Sum will return (32 for BLAKE3). +func (d *digest) Size() int { + return rawSize +} + +// BlockSize returns the hash's underlying block size. +func (d *digest) BlockSize() int { + return BlockSize +} + +// SumString computes the tree hash of the entire file and returns it as a hex string +func SumString(data []byte) string { + d := New() + d.Write(data) + return hex.EncodeToString(d.Sum(nil)) +} diff --git a/backend/teldrive/teldrive.go b/backend/teldrive/teldrive.go index d27f2101a..76b52ff53 100644 --- a/backend/teldrive/teldrive.go +++ b/backend/teldrive/teldrive.go @@ -4,6 +4,7 @@ package teldrive import ( "bufio" "context" + "encoding/json" "errors" "fmt" "io" @@ -16,6 +17,7 @@ import ( "time" "github.com/rclone/rclone/backend/teldrive/api" + "github.com/rclone/rclone/backend/teldrive/tdhash" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/configmap" @@ -23,6 +25,7 @@ import ( "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" @@ -32,15 +35,13 @@ import ( const ( timeFormat = time.RFC3339 - maxChunkSize = 2000 * fs.Mebi - defaultChunkSize = 500 * fs.Mebi - minChunkSize = 100 * fs.Mebi + maxChunkSize = 2000 * fs.Mebi // 125 × 16MB (Telegram limit) + defaultChunkSize = 512 * fs.Mebi // 32 × 16MB for optimal BLAKE3 tree hashing + minChunkSize = 64 * fs.Mebi // 4 × 16MB authCookieName = "access_token" ) -var ( - errCanNotUploadFileWithUnknownSize = errors.New("teldrive can't upload files with unknown size") -) +var telDriveHash hash.Type func init() { fs.Register(&fs.RegInfo{ @@ -93,14 +94,21 @@ func init() { Name: "encrypt_files", Default: false, Help: "Enable Native Teldrive Encryption", - }, { - + }, + { + Name: "hash_enabled", + Default: true, + Help: "Enable Blake3 Tree Hashing", + }, + { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, Default: encoder.Standard | encoder.EncodeInvalidUtf8, }}, }) + + telDriveHash = hash.RegisterHash("teldrive", "TelDriveHash", tdhash.Size, tdhash.New) } // Options defines the configuration for this backend @@ -115,7 +123,7 @@ type Options struct { ChannelID int64 `config:"channel_id"` EncryptFiles bool `config:"encrypt_files"` PageSize int64 `config:"page_size"` - ThreadedStreams bool `config:"threaded_streams"` + HashEnabled bool `config:"hash_enabled"` Enc encoder.MultiEncoder `config:"encoding"` } @@ -127,6 +135,7 @@ type Fs struct { features *fs.Features srv *rest.Client pacer *fs.Pacer + ssePacer *fs.Pacer // Dedicated pacer for SSE connection retries userId int64 dirCache *dircache.DirCache rootFolderID string @@ -142,6 +151,7 @@ type Object struct { name string modTime time.Time mimeType string + hash string // BLAKE3 tree hash from server } // Name of the remote (as passed into NewFs) @@ -165,8 +175,13 @@ func (f *Fs) Precision() time.Duration { } // Hashes returns the supported hash types of the filesystem +// TelDrive uses BLAKE3 tree hashing only (16MB fixed blocks) func (f *Fs) Hashes() hash.Set { - return hash.Set(hash.None) + if f.opt.HashEnabled { + return hash.Set(telDriveHash) + } + return hash.NewHashSet(hash.None) + } // Features returns the optional features of this Fs @@ -193,14 +208,16 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } -func checkUploadChunkSize(cs fs.SizeSuffix) error { - if cs < minChunkSize { - return fmt.Errorf("ChunkSize: %s is less than %s", cs, minChunkSize) - } - if cs > maxChunkSize { - return fmt.Errorf("ChunkSize: %s is greater than %s", cs, maxChunkSize) - } - return nil +// alignChunkSize rounds the chunk size to the nearest 16MB multiple +// and clamps it to min/max bounds +func alignChunkSize(cs fs.SizeSuffix) fs.SizeSuffix { + blockSize := int64(16 * 1024 * 1024) // 16MB + chunkSizeBytes := min(max(int64(cs), int64(minChunkSize)), int64(maxChunkSize)) + // Round to nearest 16MB multiple + // Ensure we don't exceed max after rounding + alignedSize := min(((chunkSizeBytes+blockSize/2)/blockSize)*blockSize, int64(maxChunkSize)) + + return fs.SizeSuffix(alignedSize) } func Ptr[T any](t T) *T { @@ -223,14 +240,18 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe return nil, err } - err = checkUploadChunkSize(opt.ChunkSize) - if err != nil { - return nil, err - } + // Align chunk size to 16MB multiple for optimal BLAKE3 tree hashing + opt.ChunkSize = alignChunkSize(opt.ChunkSize) if opt.ChannelID < 0 { - channnelId := strconv.FormatInt(opt.ChannelID, 10) - opt.ChannelID, _ = strconv.ParseInt(strings.TrimPrefix(channnelId, "-100"), 10, 64) + channelIDStr := strconv.FormatInt(opt.ChannelID, 10) + // teldrive API expects channel ID without the -100 prefix for supergroups/channels + trimmedIDStr := strings.TrimPrefix(channelIDStr, "-100") + newID, err := strconv.ParseInt(trimmedIDStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid channel_id: %w", err) + } + opt.ChannelID = newID } f := &Fs{ @@ -238,6 +259,12 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe root: root, opt: *opt, pacer: fs.NewPacer(ctx, pacer.NewDefault()), + // Dedicated SSE pacer with optimized settings for connection retries + ssePacer: fs.NewPacer(ctx, pacer.NewDefault( + pacer.MinSleep(1*time.Second), + pacer.MaxSleep(30*time.Second), + pacer.DecayConstant(2), + )), } f.root = strings.Trim(root, "/") @@ -498,6 +525,7 @@ func (f *Fs) newObjectWithInfo(_ context.Context, remote string, info *api.FileI name: info.Name, modTime: info.ModTime, mimeType: info.MimeType, + hash: info.Hash, } if info.Type == "folder" { return o, fs.ErrorIsDir @@ -601,14 +629,28 @@ func (f *Fs) updateFileInformation(ctx context.Context, update *api.UpdateFileIn } func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, _ ...fs.OpenOption) error { - o := &Object{ fs: f, } - uploadInfo, err := o.uploadMultipart(ctx, bufio.NewReader(in), src) - if err != nil { - return err + var uploadInfo *uploadInfo + var err error + size := src.Size() + + if size < 0 { + // Unknown size - buffer to memory/temp file first + fs.Debugf(f, "putUnchecked: unknown size, buffering to memory (threshold: %d bytes)", memoryBufferThreshold) + uploadInfo, size, err = o.uploadWithBuffering(ctx, in, src) + if err != nil { + return err + } + // Create new src with known size for createFile + src = object.NewStaticObjectInfo(src.Remote(), src.ModTime(ctx), size, false, nil, f) + } else if size > 0 { + uploadInfo, err = o.uploadMultipart(ctx, in, src) + if err != nil { + return err + } } return o.createFile(ctx, src, uploadInfo) @@ -639,9 +681,6 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin // will return the object and the error, otherwise will return // nil and the error func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - if src.Size() < 0 { - return nil, errCanNotUploadFileWithUnknownSize - } existingObj, err := f.NewObject(ctx, src.Remote()) switch err { case nil: @@ -672,13 +711,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { - - if src.Size() < 0 { - return errCanNotUploadFileWithUnknownSize - } - remote := o.Remote() - modTime := src.ModTime(ctx) leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true) @@ -687,9 +720,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op } var uploadInfo *uploadInfo + size := src.Size() - if src.Size() > 0 { - uploadInfo, err = o.uploadMultipart(ctx, bufio.NewReader(in), src) + if size < 0 { + // Unknown size - buffer to memory/temp file first + fs.Debugf(o, "Update: unknown size, buffering to memory (threshold: %d bytes)", memoryBufferThreshold) + uploadInfo, size, err = o.uploadWithBuffering(ctx, in, src) + if err != nil { + return err + } + } else if size > 0 { + uploadInfo, err = o.uploadMultipart(ctx, in, src) if err != nil { return err } @@ -697,21 +738,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op payload := &api.UpdateFileInformation{ ModTime: Ptr(modTime.UTC()), - Size: src.Size(), + Size: size, ParentID: directoryID, Name: leaf, } if uploadInfo != nil { - payload.Parts = uploadInfo.fileChunks payload.UploadId = uploadInfo.uploadID - payload.ChannelID = o.fs.opt.ChannelID + payload.ChannelID = uploadInfo.channelID payload.Encrypted = uploadInfo.encryptFile } opts := rest.Opts{ - Method: "PUT", - Path: "/api/files/" + o.id + "/parts", + Method: "PATCH", + Path: "/api/files/" + o.id, NoResponse: true, } @@ -725,105 +765,196 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op } o.modTime = modTime - - o.size = src.Size() + o.size = size return nil } // ChangeNotify calls the passed function with a path that has had changes. -// If the implementation uses polling, it should adhere to the given interval. -// -// Automatically restarts itself in case of unexpected behavior of the remote. -// -// Close the returned channel to stop being notified. func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { go func() { - processedEventIDs := make(map[string]time.Time) - var ticker *time.Ticker - var tickerC <-chan time.Time for { select { + case <-ctx.Done(): + return case pollInterval, ok := <-pollIntervalChan: if !ok { - if ticker != nil { - ticker.Stop() - } + fs.Debugf(f, "ChangeNotify: channel closed, stopping") return } - if ticker != nil { - ticker.Stop() - ticker, tickerC = nil, nil - } - if pollInterval != 0 { - ticker = time.NewTicker(pollInterval) - tickerC = ticker.C + if pollInterval > 0 { + fs.Debugf(f, "ChangeNotify: poll interval set but SSE is active, ignoring") } - case <-tickerC: - fs.Debugf(f, "Checking for changes on remote") - for eventID, timestamp := range processedEventIDs { - if time.Since(timestamp) > 5*time.Minute { - delete(processedEventIDs, eventID) - } - } - err := f.changeNotifyRunner(ctx, notifyFunc, processedEventIDs) + default: + fs.Debugf(f, "Starting SSE event stream") + err := f.changeNotifySSE(ctx, notifyFunc) if err != nil { - fs.Infof(f, "Change notify listener failure: %s", err) + fs.Infof(f, "SSE connection failed permanently: %s", err) + return } } } }() } -func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), processedEventIDs map[string]time.Time) error { +func isFatalError(err error) bool { + if err == nil { + return false + } + errStr := err.Error() + return strings.Contains(errStr, "401") || + strings.Contains(errStr, "403") || + strings.Contains(errStr, "404") +} + +func (f *Fs) changeNotifySSE(ctx context.Context, notifyFunc func(string, fs.EntryType)) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } - var changes []api.Event + var connErr error + err := f.ssePacer.Call(func() (bool, error) { + connErr = f.connectAndProcessSSE(ctx, notifyFunc) + if connErr == nil { + return false, nil + } + if fserrors.ContextError(ctx, &connErr) { + return false, connErr + } + if isFatalError(connErr) { + return false, connErr + } + return true, connErr + }) - opts := rest.Opts{ - Method: "GET", - Path: "/api/events", + if err != nil { + return err + } + + fs.Debugf(f, "SSE connection ended, will retry") } +} - err := f.pacer.Call(func() (bool, error) { - resp, err := f.srv.CallJSON(ctx, &opts, nil, &changes) - return shouldRetry(ctx, resp, err) - }) +func (f *Fs) connectAndProcessSSE(ctx context.Context, notifyFunc func(string, fs.EntryType)) error { + opts := rest.Opts{ + Method: "GET", + Path: "/api/events/stream", + ContentType: "text/event-stream", + ExtraHeaders: map[string]string{ + "Accept": "text/event-stream", + "Cache-Control": "no-cache", + }, + } + resp, err := f.srv.Call(ctx, &opts) if err != nil { - return err + return fmt.Errorf("failed to connect to SSE endpoint: %w", err) + } + if resp == nil || resp.Body == nil { + return fmt.Errorf("no response from SSE endpoint") } + defer resp.Body.Close() - var pathsToClear []string - for _, change := range changes { - if _, ok := processedEventIDs[change.ID]; ok { - continue + contentType := resp.Header.Get("Content-Type") + if !strings.Contains(contentType, "text/event-stream") { + return fmt.Errorf("unexpected content type: %s", contentType) + } + + fs.Debugf(f, "SSE connection established") + reader := bufio.NewReader(resp.Body) + var eventData strings.Builder + + for { + + select { + case <-ctx.Done(): + return nil + default: } - addPathToClear := func(parentID string) { - if path, ok := f.dirCache.GetInv(parentID); ok { - pathsToClear = append(pathsToClear, path) + + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + return fmt.Errorf("SSE stream closed by server") } + return fmt.Errorf("error reading SSE stream: %w", err) } - // Check original parent location - addPathToClear(change.Source.ParentId) + line = strings.TrimRight(line, "\r\n") + + if line == "" { + if eventData.Len() > 0 { + data := eventData.String() + eventData.Reset() + + if err := f.processSSEEvent(data, notifyFunc); err != nil { + fs.Debugf(f, "Failed to process SSE event: %s", err) + } + } + continue + } - // Check destination parent location if file was moved - if change.Source.DestParentId != "" { - addPathToClear(change.Source.DestParentId) + if strings.HasPrefix(line, "data: ") { + eventData.WriteString(line[6:]) } - processedEventIDs[change.ID] = time.Now() } - notifiedPaths := make(map[string]bool) - for _, path := range pathsToClear { - if _, ok := notifiedPaths[path]; ok { - continue +} + +func (f *Fs) processSSEEvent(data string, notifyFunc func(string, fs.EntryType)) error { + var event api.Event + if err := json.Unmarshal([]byte(data), &event); err != nil { + return fmt.Errorf("failed to unmarshal event: %w", err) + } + + // Get parent path from cache + parentPath, ok := f.dirCache.GetInv(event.Source.ParentId) + if !ok { + fs.Debugf(f, "SSE: skipping event for uncached parent %s", event.Source.ParentId) + return nil + } + + fullPath := path.Join(parentPath, event.Source.Name) + + // Filter: only notify for paths within root + if !strings.HasPrefix(fullPath, f.root) { + return nil + } + + var entryType fs.EntryType + switch event.Source.Type { + case "folder": + entryType = fs.EntryDirectory + case "file": + entryType = fs.EntryObject + default: + entryType = fs.EntryObject + } + + // Handle move events - notify both old and new locations + if event.Type == "file_move" && event.Source.DestParentId != "" { + if oldParentPath, ok := f.dirCache.GetInv(event.Source.DestParentId); ok { + oldPath := path.Join(oldParentPath, event.Source.Name) + if strings.HasPrefix(oldPath, f.root) { + fs.Debugf(f, "SSE move event: old path %s", oldPath) + notifyFunc(oldPath, entryType) + } } - notifiedPaths[path] = true - notifyFunc(path, fs.EntryDirectory) } + + fs.Debugf(f, "SSE event: %s (%v, type=%s)", fullPath, entryType, event.Type) + notifyFunc(fullPath, entryType) + return nil } +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + // OpenChunkWriter returns the chunk size and a ChunkWriter // // Pass in the remote and the src object @@ -834,15 +965,23 @@ func (f *Fs) OpenChunkWriter( src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) { - if src.Size() <= 0 { - return info, nil, errCanNotUploadFileWithUnknownSize - } - o := &Object{ fs: f, remote: remote, } + // If size is unknown, use bufferingChunkWriter that supports out-of-order chunks + if src.Size() <= 0 { + fs.Debugf(f, "OpenChunkWriter: unknown size, using buffering chunk writer") + return fs.ChunkWriterInfo{}, &bufferingChunkWriter{ + f: f, + o: o, + src: src, + remote: remote, + chunks: make(map[int]*chunkFile), + }, nil + } + uploadInfo, err := o.prepareUpload(ctx, src) if err != nil { @@ -1096,8 +1235,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { var resp *http.Response - http := o.fs.srv - fs.FixRangeOption(options, o.size) opts := rest.Opts{ @@ -1105,14 +1242,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read Path: fmt.Sprintf("/api/files/%s/%s", o.id, url.QueryEscape(o.name)), Options: options, } - if !o.fs.opt.ThreadedStreams { - opts.Parameters = url.Values{ - "download": []string{"1"}, - } - } err = o.fs.pacer.Call(func() (bool, error) { - resp, err = http.Call(ctx, &opts) + resp, err = o.fs.srv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) @@ -1228,8 +1360,36 @@ func (o *Object) Size() int64 { return o.size } -// Hash returns the Md5sum of an object returning a lowercase hex string func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + if t != telDriveHash { + return "", hash.ErrUnsupported + } + + if o.hash != "" { + return o.hash, nil + } + + // Fetch from server if not cached + var file api.FileInfo + opts := rest.Opts{ + Method: "GET", + Path: "/api/files/" + o.id, + } + + err := o.fs.pacer.Call(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &file) + return shouldRetry(ctx, resp, err) + }) + + if err != nil { + return "", fmt.Errorf("failed to get file hash: %w", err) + } + + if file.Hash != "" { + o.hash = file.Hash + return o.hash, nil + } + return "", hash.ErrUnsupported } diff --git a/backend/teldrive/upload.go b/backend/teldrive/upload.go index c5ab79846..25782d468 100644 --- a/backend/teldrive/upload.go +++ b/backend/teldrive/upload.go @@ -1,6 +1,7 @@ package teldrive import ( + "bytes" "context" "crypto/md5" "encoding/hex" @@ -8,12 +9,12 @@ import ( "fmt" "io" "net/url" - "sort" + "os" "strconv" - "sync" "github.com/google/uuid" "github.com/rclone/rclone/backend/teldrive/api" + "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/rest" @@ -21,6 +22,10 @@ import ( "github.com/rclone/rclone/fs" ) +// memoryBufferThreshold is the size limit for memory buffering +// Files smaller than this will be buffered in memory, larger files use temp file +const memoryBufferThreshold = 10 * 1024 * 1024 // 10MB + type uploadInfo struct { existingChunks map[int]api.PartFile uploadID string @@ -28,19 +33,16 @@ type uploadInfo struct { encryptFile bool chunkSize int64 totalChunks int64 - fileChunks []api.FilePart fileName string dir string } type objectChunkWriter struct { - size int64 - f *Fs - src fs.ObjectInfo - partsToCommitMu sync.Mutex - partsToCommit []api.PartFile - o *Object - uploadInfo *uploadInfo + size int64 + f *Fs + src fs.ObjectInfo + o *Object + uploadInfo *uploadInfo } func getMD5Hash(text string) string { @@ -65,14 +67,10 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea r.Account(int(existing.Size)) default: } - w.addCompletedPart(existing) return existing.Size, nil } - var ( - response api.PartFile - partName string - ) + var partName string err = w.f.pacer.Call(func() (bool, error) { @@ -101,6 +99,7 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea Method: "POST", Body: reader, ContentLength: &size, + NoResponse: true, ContentType: "application/octet-stream", Parameters: url.Values{ "partName": []string{partName}, @@ -111,6 +110,10 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea }, } + if w.f.opt.HashEnabled { + opts.Parameters.Set("hashing", "true") + } + if w.f.opt.UploadHost != "" { opts.RootURL = w.f.opt.UploadHost + "/api/uploads/" + w.uploadInfo.uploadID @@ -118,16 +121,13 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea opts.Path = "/api/uploads/" + w.uploadInfo.uploadID } - resp, err := w.f.srv.CallJSON(ctx, &opts, nil, &response) + resp, err := w.f.srv.Call(ctx, &opts) retry, err := shouldRetry(ctx, resp, err) if err != nil { fs.Debugf(w.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) } - if response.PartId == 0 { - return true, fmt.Errorf("error sending chunk %d", chunkNumber) - } return retry, err @@ -137,45 +137,269 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea return 0, fmt.Errorf("error sending chunk %d: %v", chunkNumber, err) } - w.addCompletedPart(response) fs.Debugf(w.o, "Done sending chunk %d", chunkNumber) return size, err } -// add a part number and etag to the completed parts -func (w *objectChunkWriter) addCompletedPart(part api.PartFile) { - w.partsToCommitMu.Lock() - defer w.partsToCommitMu.Unlock() - w.partsToCommit = append(w.partsToCommit, part) +func (w *objectChunkWriter) Close(ctx context.Context) error { + + return w.o.createFile(ctx, w.src, w.uploadInfo) } -func (w *objectChunkWriter) Close(ctx context.Context) error { +func (*objectChunkWriter) Abort(ctx context.Context) error { + return nil +} - if w.uploadInfo.totalChunks != int64(len(w.partsToCommit)) { - return fmt.Errorf("uploaded failed") +// chunkFile stores a single chunk's temp file path and size +type chunkFile struct { + tempPath string + size int64 +} + +// bufferingChunkWriter handles uploads with unknown size by buffering chunks to temp files +// Used by OpenChunkWriter for streaming uploads +// Supports out-of-order chunks - stores each chunk in separate temp file and reassembles in order at Close() +type bufferingChunkWriter struct { + f *Fs + o *Object + src fs.ObjectInfo + remote string + chunks map[int]*chunkFile // Store temp file paths by chunk number + totalSize int64 + maxChunk int // Track highest chunk number seen +} + +// WriteChunk stores a chunk to a temp file by its chunk number (supports out-of-order writes) +func (w *bufferingChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) { + // Create temp file for this chunk + tempFile, err := os.CreateTemp("", fmt.Sprintf("rclone-teldrive-chunk-%d-*", chunkNumber)) + if err != nil { + return 0, fmt.Errorf("failed to create temp file for chunk %d: %w", chunkNumber, err) } - sort.Slice(w.partsToCommit, func(i, j int) bool { - return w.partsToCommit[i].PartNo < w.partsToCommit[j].PartNo - }) + tempPath := tempFile.Name() - fileChunks := []api.FilePart{} + // Copy data to temp file + size, err = io.Copy(tempFile, reader) + if err != nil { + tempFile.Close() + _ = os.Remove(tempPath) + return 0, fmt.Errorf("failed to write chunk %d to temp file: %w", chunkNumber, err) + } - for _, part := range w.partsToCommit { - fileChunks = append(fileChunks, api.FilePart{ID: part.PartId, Salt: part.Salt}) + // Close temp file (we'll reopen for reading in Close) + if err := tempFile.Close(); err != nil { + _ = os.Remove(tempPath) + return 0, fmt.Errorf("failed to close temp file for chunk %d: %w", chunkNumber, err) } - w.uploadInfo.fileChunks = fileChunks + // Store chunk info (file will be deleted in Close or Abort) + w.chunks[chunkNumber] = &chunkFile{ + tempPath: tempPath, + size: size, + } + w.totalSize += size - return w.o.createFile(ctx, w.src, w.uploadInfo) + // Track highest chunk number + if chunkNumber > w.maxChunk { + w.maxChunk = chunkNumber + } + + fs.Debugf(w.f, "Buffered chunk %d: %d bytes to temp file %s", chunkNumber, size, tempPath) + return size, nil } -func (*objectChunkWriter) Abort(ctx context.Context) error { +// Close reassembles all chunks in order and uploads to TelDrive +func (w *bufferingChunkWriter) Close(ctx context.Context) error { + fs.Debugf(w.f, "Closing bufferingChunkWriter: %d chunks, total size %d bytes", len(w.chunks), w.totalSize) + + // Create a reader that yields chunks in order (0, 1, 2, ...) + chunkReader := &orderedChunkReader{ + chunks: w.chunks, + maxChunk: w.maxChunk, + current: 0, + } + + // Create source info with known size + src := object.NewStaticObjectInfo(w.remote, w.src.ModTime(ctx), w.totalSize, false, nil, w.f) + + // Upload using the ordered chunk reader + uploadInfo, err := w.o.uploadMultipart(ctx, chunkReader, src) + if err != nil { + return fmt.Errorf("failed to upload buffered chunks: %w", err) + } + + // Clean up temp files + chunkReader.cleanup() + w.chunks = nil + + return w.o.createFile(ctx, src, uploadInfo) +} + +// Abort cleans up temp files +func (w *bufferingChunkWriter) Abort(ctx context.Context) error { + for _, chunk := range w.chunks { + if chunk.tempPath != "" { + _ = os.Remove(chunk.tempPath) + } + } + w.chunks = nil return nil } +// orderedChunkReader reads chunks in order (0, 1, 2, ...) from temp files +type orderedChunkReader struct { + chunks map[int]*chunkFile + maxChunk int + current int + file *os.File + remaining int64 +} + +func (r *orderedChunkReader) Read(p []byte) (n int, err error) { + for r.current <= r.maxChunk { + // Open next chunk file if needed + if r.file == nil { + chunk, ok := r.chunks[r.current] + if !ok { + // Skip missing chunks (shouldn't happen in normal operation) + r.current++ + continue + } + + file, err := os.Open(chunk.tempPath) + if err != nil { + return 0, fmt.Errorf("failed to open chunk %d temp file: %w", r.current, err) + } + r.file = file + r.remaining = chunk.size + } + + // Read from current chunk file + n, err = r.file.Read(p) + if n > 0 { + r.remaining -= int64(n) + if r.remaining <= 0 { + // Finished this chunk, close and move to next + r.file.Close() + r.file = nil + r.current++ + } + return n, nil + } + if err != nil && err != io.EOF { + return 0, err + } + + // EOF on this chunk, close and move to next + r.file.Close() + r.file = nil + r.current++ + } + + return 0, io.EOF +} + +func (r *orderedChunkReader) cleanup() { + if r.file != nil { + r.file.Close() + } + for _, chunk := range r.chunks { + if chunk.tempPath != "" { + _ = os.Remove(chunk.tempPath) + } + } +} + +// uploadWithBuffering buffers data to memory or temp file for unknown-sized uploads +// Returns the uploadInfo, final size, and any error +func (o *Object) uploadWithBuffering(ctx context.Context, in io.Reader, src fs.ObjectInfo) (*uploadInfo, int64, error) { + var buffer bytes.Buffer + var tempFile *os.File + var written int64 + + // Read data in chunks + buf := make([]byte, 64*1024) // 64KB read buffer + for { + n, err := in.Read(buf) + if n > 0 { + // Check if we need to switch to temp file + if tempFile == nil && written+int64(n) > memoryBufferThreshold { + fs.Debugf(o, "Buffering: switching to temp file at %d bytes", written+int64(n)) + tempFile, err = os.CreateTemp("", "rclone-teldrive-*") + if err != nil { + return nil, 0, fmt.Errorf("failed to create temp file: %w", err) + } + _ = os.Remove(tempFile.Name()) // Delete immediately (Unix trick) + + // Copy existing buffer to temp file + if buffer.Len() > 0 { + _, err = tempFile.Write(buffer.Bytes()) + if err != nil { + tempFile.Close() + return nil, 0, fmt.Errorf("failed to copy buffer to temp file: %w", err) + } + buffer = bytes.Buffer{} // Free memory + } + } + + // Write to appropriate target + if tempFile != nil { + _, err = tempFile.Write(buf[:n]) + if err != nil { + tempFile.Close() + return nil, 0, fmt.Errorf("failed to write to temp file: %w", err) + } + } else { + buffer.Write(buf[:n]) + } + written += int64(n) + } + if err == io.EOF { + break + } + if err != nil { + if tempFile != nil { + tempFile.Close() + } + return nil, 0, fmt.Errorf("failed to read input: %w", err) + } + } + + // Now upload with known size + var uploadInfo *uploadInfo + var err error + + if tempFile != nil { + // Upload from temp file + defer func() { + _ = tempFile.Close() + }() + + _, err = tempFile.Seek(0, io.SeekStart) + if err != nil { + return nil, 0, fmt.Errorf("failed to seek temp file: %w", err) + } + + fs.Debugf(o, "Uploading %d bytes from temp file", written) + srcWithSize := object.NewStaticObjectInfo(src.Remote(), src.ModTime(ctx), written, false, nil, o.fs) + uploadInfo, err = o.uploadMultipart(ctx, tempFile, srcWithSize) + } else { + // Upload from memory buffer + fs.Debugf(o, "Uploading %d bytes from memory buffer", written) + srcWithSize := object.NewStaticObjectInfo(src.Remote(), src.ModTime(ctx), written, false, nil, o.fs) + uploadInfo, err = o.uploadMultipart(ctx, bytes.NewReader(buffer.Bytes()), srcWithSize) + } + + if err != nil { + return nil, 0, err + } + + return uploadInfo, written, nil +} + func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo) (*uploadInfo, error) { leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, src.Remote(), true) @@ -291,6 +515,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec opts := rest.Opts{ Method: "POST", Body: partReader, + NoResponse: true, ContentLength: &n, ContentType: "application/octet-stream", Parameters: url.Values{ @@ -302,39 +527,24 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec }, } + if o.fs.opt.HashEnabled { + opts.Parameters.Set("hashing", "true") + } + if o.fs.opt.UploadHost != "" { opts.RootURL = o.fs.opt.UploadHost + "/api/uploads/" + uploadInfo.uploadID } else { opts.Path = "/api/uploads/" + uploadInfo.uploadID } - - var partInfo api.PartFile - - _, err := o.fs.srv.CallJSON(ctx, &opts, nil, &partInfo) - + _, err := o.fs.srv.Call(ctx, &opts) if err != nil { return nil, err } - uploadedSize += n - - partsToCommit = append(partsToCommit, partInfo) } - sort.Slice(partsToCommit, func(i, j int) bool { - return partsToCommit[i].PartNo < partsToCommit[j].PartNo - }) - - fileChunks := []api.FilePart{} - - for _, part := range partsToCommit { - fileChunks = append(fileChunks, api.FilePart{ID: part.PartId, Salt: part.Salt}) - } - - uploadInfo.fileChunks = fileChunks } - return uploadInfo, nil } @@ -353,7 +563,7 @@ func (o *Object) createFile(ctx context.Context, src fs.ObjectInfo, uploadInfo * ParentId: uploadInfo.dir, MimeType: fs.MimeType(ctx, src), Size: src.Size(), - Parts: uploadInfo.fileChunks, + UploadId: uploadInfo.uploadID, ChannelID: uploadInfo.channelID, Encrypted: uploadInfo.encryptFile, ModTime: src.ModTime(ctx).UTC(), @@ -367,20 +577,6 @@ func (o *Object) createFile(ctx context.Context, src fs.ObjectInfo, uploadInfo * if err != nil { return err } - if src.Size() > 0 { - opts = rest.Opts{ - Method: "DELETE", - Path: "/api/uploads/" + uploadInfo.uploadID, - NoResponse: true, - } - err = o.fs.pacer.Call(func() (bool, error) { - resp, err := o.fs.srv.Call(ctx, &opts) - return shouldRetry(ctx, resp, err) - }) - if err != nil { - return err - } - } return nil } diff --git a/fs/config.go b/fs/config.go index ce0a1df9e..aa3791175 100644 --- a/fs/config.go +++ b/fs/config.go @@ -464,6 +464,11 @@ var ConfigOptionsInfo = Options{{ Default: SizeSuffix(64 * 1024 * 1024), Help: "Chunk size for multi-thread downloads / uploads, if not set by filesystem", Groups: "Copy", +}, { + Name: "multi_thread_resume", + Default: false, + Help: "Enable resume support for multi-thread downloads on local filesystem (uses state files to track progress)", + Groups: "Copy", }, { Name: "use_json_log", Default: false, @@ -562,9 +567,9 @@ var ConfigOptionsInfo = Options{{ Help: "Transform paths during the copy process.", Groups: "Copy", }, { - Name: "http_proxy", + Name: "proxy", Default: "", - Help: "HTTP proxy URL.", + Help: "Proxy URL, e.g. http://127.0.0.1:8080 or socks5://127.0.0.1:1080", Groups: "Networking", }} @@ -656,6 +661,7 @@ type ConfigInfo struct { MultiThreadSet bool `config:"multi_thread_set"` // whether MultiThreadStreams was set (set in fs/config/configflags) MultiThreadChunkSize SizeSuffix `config:"multi_thread_chunk_size"` // Chunk size for multi-thread downloads / uploads, if not set by filesystem MultiThreadWriteBufferSize SizeSuffix `config:"multi_thread_write_buffer_size"` + MultiThreadResume bool `config:"multi_thread_resume"` OrderBy string `config:"order_by"` // instructions on how to order the transfer UploadHeaders []*HTTPOption `config:"upload_headers"` DownloadHeaders []*HTTPOption `config:"download_headers"` @@ -679,7 +685,7 @@ type ConfigInfo struct { MetadataMapper SpaceSepList `config:"metadata_mapper"` MaxConnections int `config:"max_connections"` NameTransform []string `config:"name_transform"` - HTTPProxy string `config:"http_proxy"` + Proxy string `config:"proxy"` } func init() { diff --git a/fs/fshttp/http.go b/fs/fshttp/http.go index 65684f3db..ece264971 100644 --- a/fs/fshttp/http.go +++ b/fs/fshttp/http.go @@ -211,11 +211,11 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) *T // This also means we get new stuff when it gets added to go t := new(http.Transport) structs.SetDefaults(t, http.DefaultTransport.(*http.Transport)) - if ci.HTTPProxy != "" { - proxyURL, err := url.Parse(ci.HTTPProxy) + if ci.Proxy != "" { + proxyURL, err := url.Parse(ci.Proxy) if err != nil { t.Proxy = func(*http.Request) (*url.URL, error) { - return nil, fmt.Errorf("failed to set --http-proxy from %q: %w", ci.HTTPProxy, err) + return nil, fmt.Errorf("failed to set --proxy from %q: %w", ci.Proxy, err) } } else { t.Proxy = http.ProxyURL(proxyURL) diff --git a/fs/object/object.go b/fs/object/object.go index 9b4e25afe..9013912f6 100644 --- a/fs/object/object.go +++ b/fs/object/object.go @@ -5,11 +5,21 @@ import ( "bytes" "context" "errors" + "fmt" "io" + "mime" + "net/http" + "net/textproto" + "path" + "strings" "time" "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/rest" ) // StaticObjectInfo is an ObjectInfo which can be constructed from scratch @@ -334,3 +344,168 @@ var ( _ fs.Object = (*MemoryObject)(nil) _ fs.Metadataer = (*MemoryObject)(nil) ) + +var retryErrorCodes = []int{429, 500, 502, 503, 504} + +func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + if fserrors.ContextError(ctx, &err) { + return false, err + } + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +func (o *HTTPObject) fetch(ctx context.Context) error { + + headOpts := rest.Opts{ + Method: "HEAD", + RootURL: o.url, + } + + resp, err := o.client.Call(ctx, &headOpts) + if err == nil && resp.StatusCode >= 200 && resp.StatusCode < 300 { + defer resp.Body.Close() + return o.parseMetadataResponse(resp) + } + if resp != nil { + resp.Body.Close() + } + + getOpts := rest.Opts{ + Method: "GET", + RootURL: o.url, + ExtraHeaders: map[string]string{ + "Range": "bytes=0-0", + }, + } + + resp, err = o.client.Call(ctx, &getOpts) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("metadata fetch failed: %s", resp.Status) + } + + return o.parseMetadataResponse(resp) +} + +func (o *HTTPObject) parseMetadataResponse(resp *http.Response) error { + var filename string + + if o.dstFileNameFromHeader { + if cd := resp.Header.Get("Content-Disposition"); cd != "" { + if _, params, err := mime.ParseMediaType(cd); err == nil { + if val, ok := params["filename"]; ok { + filename = textproto.TrimString(path.Base(strings.ReplaceAll(val, "\\", "/"))) + } + } + } + } else { + filename = path.Base(resp.Request.URL.Path) + } + o.size = rest.ParseSizeFromHeaders(resp.Header) + + if lm := resp.Header.Get("Last-Modified"); lm != "" { + if t, err := http.ParseTime(lm); err == nil { + o.modTime = t + } + } + o.remote = filename + return nil + +} + +var HTTPFs httpFs + +type httpFs struct{} + +func (h httpFs) Features() *fs.Features { return &fs.Features{} } +func (h httpFs) Hashes() hash.Set { return hash.Set(hash.None) } +func (h httpFs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + return nil, nil +} +func (h httpFs) Mkdir(ctx context.Context, dir string) error { return nil } +func (h httpFs) Name() string { return "http" } +func (h httpFs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return nil, fs.ErrorObjectNotFound +} +func (h httpFs) Precision() time.Duration { return time.Nanosecond } +func (h httpFs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return nil, nil +} +func (h httpFs) Rmdir(ctx context.Context, dir string) error { return nil } +func (h httpFs) Root() string { return "" } +func (h httpFs) String() string { return "http" } + +var _ fs.Fs = HTTPFs + +type HTTPObject struct { + p *fs.Pacer + client *rest.Client + url string + remote string + size int64 + modTime time.Time + dstFileNameFromHeader bool +} + +func NewHTTPObject(ctx context.Context, url string, dstFileNameFromHeader bool) (*HTTPObject, error) { + ci := fs.GetConfig(ctx) + o := &HTTPObject{url: url, client: rest.NewClient(fshttp.NewClient(ctx)), dstFileNameFromHeader: dstFileNameFromHeader} + o.p = fs.NewPacer(ctx, pacer.NewDefault()) + o.p.SetRetries(ci.LowLevelRetries) + err := o.fetch(ctx) + if err != nil { + return nil, err + } + return o, nil +} + +func (o *HTTPObject) String() string { + if o == nil { + return "" + } + return o.remote +} + +func (o *HTTPObject) Remote() string { return o.remote } +func (o *HTTPObject) ModTime(ctx context.Context) time.Time { return o.modTime } +func (o *HTTPObject) Size() int64 { return o.size } +func (o *HTTPObject) Fs() fs.Info { return HTTPFs } +func (o *HTTPObject) Storable() bool { return true } +func (o *HTTPObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { + var ( + err error + res *http.Response + ) + + fs.FixRangeOption(options, o.size) + err = o.p.Call(func() (bool, error) { + opts := rest.Opts{ + Method: "GET", + RootURL: o.url, + Options: options, + } + res, err = o.client.Call(ctx, &opts) + return shouldRetry(ctx, res, err) + }) + + if err != nil { + return nil, fmt.Errorf("Open failed: %w", err) + } + return res.Body, nil +} +func (o *HTTPObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + return nil +} +func (o *HTTPObject) Remove(ctx context.Context) error { return nil } +func (o *HTTPObject) SetModTime(ctx context.Context, modTime time.Time) error { return nil } +func (o *HTTPObject) Hash(ctx context.Context, r hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +var ( + _ fs.Object = (*HTTPObject)(nil) +) diff --git a/fs/operations/multithread.go b/fs/operations/multithread.go index 9abd5f743..4d99c9b59 100644 --- a/fs/operations/multithread.go +++ b/fs/operations/multithread.go @@ -3,13 +3,18 @@ package operations import ( "bufio" "context" + "encoding/binary" "errors" "fmt" "io" + "os" + "path/filepath" + "sync" "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/multipart" "github.com/rclone/rclone/lib/pool" @@ -18,8 +23,286 @@ import ( const ( multithreadChunkSize = 64 << 10 + resumeStateSuffix = ".rclone" + resumeFileVersion = 1 ) +// resumeState holds the resume state using a bitmap +type resumeState struct { + Source string + Destination string + Size int64 + ChunkSize int64 + NumChunks int + ETag string // ETag from source (if available) + ModTime time.Time // Modification time from source + // Bitmap: 1 bit per chunk, 1=complete, 0=missing + Bitmap []byte +} + +// loadResumeState loads resume state from binary file +func loadResumeState(filePath string) (*resumeState, error) { + statePath := filePath + resumeStateSuffix + + f, err := os.Open(statePath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + defer f.Close() + + state := &resumeState{} + + // Read version (1 byte) + var version uint8 + if err := binary.Read(f, binary.BigEndian, &version); err != nil { + return nil, fmt.Errorf("failed to read version: %w", err) + } + if version != resumeFileVersion { + return nil, fmt.Errorf("unsupported resume file version: %d", version) + } + + // Read source length and source string + var sourceLen uint32 + if err := binary.Read(f, binary.BigEndian, &sourceLen); err != nil { + return nil, fmt.Errorf("failed to read source length: %w", err) + } + sourceBytes := make([]byte, sourceLen) + if _, err := io.ReadFull(f, sourceBytes); err != nil { + return nil, fmt.Errorf("failed to read source: %w", err) + } + state.Source = string(sourceBytes) + + // Read destination length and destination string + var destLen uint32 + if err := binary.Read(f, binary.BigEndian, &destLen); err != nil { + return nil, fmt.Errorf("failed to read destination length: %w", err) + } + destBytes := make([]byte, destLen) + if _, err := io.ReadFull(f, destBytes); err != nil { + return nil, fmt.Errorf("failed to read destination: %w", err) + } + state.Destination = string(destBytes) + + // Read size + if err := binary.Read(f, binary.BigEndian, &state.Size); err != nil { + return nil, fmt.Errorf("failed to read size: %w", err) + } + + // Read chunk size + if err := binary.Read(f, binary.BigEndian, &state.ChunkSize); err != nil { + return nil, fmt.Errorf("failed to read chunk size: %w", err) + } + + // Read num chunks (read as int32 then convert to int) + var numChunks int32 + if err := binary.Read(f, binary.BigEndian, &numChunks); err != nil { + return nil, fmt.Errorf("failed to read num chunks: %w", err) + } + state.NumChunks = int(numChunks) + + // Read ETag length and ETag string + var etagLen uint32 + if err := binary.Read(f, binary.BigEndian, &etagLen); err != nil { + return nil, fmt.Errorf("failed to read etag length: %w", err) + } + if etagLen > 0 { + etagBytes := make([]byte, etagLen) + if _, err := io.ReadFull(f, etagBytes); err != nil { + return nil, fmt.Errorf("failed to read etag: %w", err) + } + state.ETag = string(etagBytes) + } + + // Read ModTime (Unix timestamp in seconds) + var modTimeSec int64 + if err := binary.Read(f, binary.BigEndian, &modTimeSec); err != nil { + return nil, fmt.Errorf("failed to read modtime: %w", err) + } + state.ModTime = time.Unix(modTimeSec, 0) + + // Read bitmap + bitmapLen := (state.NumChunks + 7) / 8 + state.Bitmap = make([]byte, bitmapLen) + if _, err := io.ReadFull(f, state.Bitmap); err != nil { + return nil, fmt.Errorf("failed to read bitmap: %w", err) + } + + return state, nil +} + +// save saves the resume state to binary file +func (rs *resumeState) save(filePath string) error { + statePath := filePath + resumeStateSuffix + tempPath := statePath + ".tmp" + + f, err := os.Create(tempPath) + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + + // Write version + if err := binary.Write(f, binary.BigEndian, uint8(resumeFileVersion)); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write version: %w", err) + } + + // Write source + if err := binary.Write(f, binary.BigEndian, uint32(len(rs.Source))); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write source length: %w", err) + } + if _, err := f.WriteString(rs.Source); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write source: %w", err) + } + + // Write destination + if err := binary.Write(f, binary.BigEndian, uint32(len(rs.Destination))); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write destination length: %w", err) + } + if _, err := f.WriteString(rs.Destination); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write destination: %w", err) + } + + // Write size + if err := binary.Write(f, binary.BigEndian, rs.Size); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write size: %w", err) + } + + // Write chunk size + if err := binary.Write(f, binary.BigEndian, rs.ChunkSize); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write chunk size: %w", err) + } + + // Write num chunks (cast to int32 for fixed size) + if err := binary.Write(f, binary.BigEndian, int32(rs.NumChunks)); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write num chunks: %w", err) + } + + // Write ETag + if err := binary.Write(f, binary.BigEndian, uint32(len(rs.ETag))); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write etag length: %w", err) + } + if _, err := f.WriteString(rs.ETag); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write etag: %w", err) + } + + // Write ModTime (Unix timestamp) + if err := binary.Write(f, binary.BigEndian, rs.ModTime.Unix()); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write modtime: %w", err) + } + + // Write bitmap + if _, err := f.Write(rs.Bitmap); err != nil { + f.Close() + os.Remove(tempPath) + return fmt.Errorf("failed to write bitmap: %w", err) + } + + if err := f.Close(); err != nil { + os.Remove(tempPath) + return fmt.Errorf("failed to close file: %w", err) + } + + // Atomic rename + if err := os.Rename(tempPath, statePath); err != nil { + os.Remove(tempPath) + return fmt.Errorf("failed to rename file: %w", err) + } + + return nil +} + +// deleteResumeState deletes the resume state file +func deleteResumeState(filePath string) error { + statePath := filePath + resumeStateSuffix + if err := os.Remove(statePath); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to delete resume state: %w", err) + } + return nil +} + +// isChunkComplete checks if a chunk is marked as complete +func (rs *resumeState) isChunkComplete(chunkIdx int) bool { + if chunkIdx < 0 || chunkIdx >= rs.NumChunks { + return false + } + byteIdx := chunkIdx / 8 + bitIdx := uint(chunkIdx % 8) + return (rs.Bitmap[byteIdx] & (1 << (7 - bitIdx))) != 0 +} + +// markChunkComplete marks a chunk as complete +func (rs *resumeState) markChunkComplete(chunkIdx int) { + if chunkIdx < 0 || chunkIdx >= rs.NumChunks { + return + } + byteIdx := chunkIdx / 8 + bitIdx := uint(chunkIdx % 8) + rs.Bitmap[byteIdx] |= 1 << (7 - bitIdx) +} + +// countComplete returns the number of complete chunks +func (rs *resumeState) countComplete() int { + count := 0 + for i := 0; i < rs.NumChunks; i++ { + if rs.isChunkComplete(i) { + count++ + } + } + return count +} + +// validate checks if the resume state is valid for the given parameters +func (rs *resumeState) validate(source string, size int64, chunkSize int64) error { + if rs.Source != source { + return fmt.Errorf("source mismatch: expected %s, got %s", rs.Source, source) + } + if rs.Size != size { + return fmt.Errorf("size mismatch: expected %d, got %d", rs.Size, size) + } + if rs.ChunkSize != chunkSize { + return fmt.Errorf("chunk size mismatch: expected %d, got %d", rs.ChunkSize, chunkSize) + } + return nil +} + +// newResumeState creates a new resume state with empty bitmap +func newResumeState(source, destination string, size, chunkSize int64) *resumeState { + numChunks := calculateNumChunks(size, chunkSize) + bitmapLen := (numChunks + 7) / 8 + return &resumeState{ + Source: source, + Destination: destination, + Size: size, + ChunkSize: chunkSize, + NumChunks: numChunks, + Bitmap: make([]byte, bitmapLen), + } +} + // Return a boolean as to whether we should use multi thread copy for // this transfer func doMultiThreadCopy(ctx context.Context, f fs.Fs, src fs.Object) bool { @@ -93,6 +376,7 @@ func (mc *multiThreadCopyState) copyChunk(ctx context.Context, chunk int, writer defer fs.CheckClose(rc, &err) var rs io.ReadSeeker + if mc.noBuffering { // Read directly if we are sure we aren't going to seek // and account with accounting @@ -134,6 +418,13 @@ func calculateNumChunks(size int64, chunkSize int64) int { func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, concurrency int, tr *accounting.Transfer, options ...fs.OpenOption) (newDst fs.Object, err error) { openChunkWriter := f.Features().OpenChunkWriter ci := fs.GetConfig(ctx) + + // Check if resume is enabled and destination supports it + resumeEnabled := ci.MultiThreadResume && f.Features().IsLocal + + var state *resumeState + var localPath string + noBuffering := false usingOpenWriterAt := false if openChunkWriter == nil { @@ -174,7 +465,7 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, uploadedOK := false defer atexit.OnError(&err, func() { cancel() - if info.LeavePartsOnError || uploadedOK { + if info.LeavePartsOnError || uploadedOK || resumeEnabled { return } fs.Debugf(src, "multi-thread copy: cancelling transfer on exit") @@ -205,6 +496,52 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, concurrency = 1 } + // Initialize or validate resume state + if resumeEnabled { + localPath = filepath.Join(f.Root(), remote) + + // Try to load existing resume state + state, err = loadResumeState(localPath) + if err != nil { + fs.Debugf(remote, "Failed to load resume state, starting fresh: %v", err) + state = nil + } else if state != nil { + // Validate state against current file + if err := state.validate(src.Remote(), src.Size(), info.ChunkSize); err != nil { + fs.Debugf(remote, "Resume state invalid (%v), starting fresh", err) + state = nil + } else { + // Validate ETag if available + currentETag, err := src.Hash(ctx, hash.MD5) + if err == nil && currentETag != "" && state.ETag != "" && currentETag != state.ETag { + fs.Logf(remote, "ETag mismatch (source changed), starting fresh download") + state = nil + } else if !state.ModTime.IsZero() { + currentModTime := src.ModTime(ctx) + if !currentModTime.Equal(state.ModTime) { + fs.Logf(remote, "Modification time mismatch (source changed), starting fresh download") + state = nil + } + } + } + } + + if state == nil { + state = newResumeState(src.Remote(), localPath, src.Size(), info.ChunkSize) + // Store ETag and ModTime + etag, err := src.Hash(ctx, hash.MD5) + if err == nil { + state.ETag = etag + } + state.ModTime = src.ModTime(ctx) + } + + completed := state.countComplete() + if completed > 0 { + fs.Logf(remote, "Resuming download: %d/%d chunks already complete", completed, state.NumChunks) + } + } + g, gCtx := errgroup.WithContext(uploadCtx) g.SetLimit(concurrency) @@ -220,15 +557,55 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, // Make accounting mc.acc = tr.Account(gCtx, nil) + // Account for already-downloaded bytes to show correct progress position + if resumeEnabled && state != nil { + var completedBytes int64 + for chunkIdx := range numChunks { + if state.isChunkComplete(chunkIdx) { + chunkStart := int64(chunkIdx) * info.ChunkSize + chunkEnd := min(chunkStart+info.ChunkSize, src.Size()) + completedBytes += chunkEnd - chunkStart + } + } + if completedBytes > 0 { + mc.acc.AccountReadN(completedBytes) + } + } + fs.Debugf(src, "Starting multi-thread copy with %d chunks of size %v with %v parallel streams", mc.numChunks, fs.SizeSuffix(mc.partSize), concurrency) + + // Track chunks for state saving + var stateMu sync.Mutex + for chunk := range mc.numChunks { + // Skip completed chunks if resuming + if resumeEnabled && state != nil && state.isChunkComplete(chunk) { + continue + } + // Fail fast, in case an errgroup managed function returns an error if gCtx.Err() != nil { break } chunk := chunk g.Go(func() error { - return mc.copyChunk(gCtx, chunk, chunkWriter) + err := mc.copyChunk(gCtx, chunk, chunkWriter) + if err != nil { + return err + } + + // Mark as complete and save state (only after successful write) + if resumeEnabled && state != nil { + stateMu.Lock() + state.markChunkComplete(chunk) + if localPath != "" { + if saveErr := state.save(localPath); saveErr != nil { + fs.Debugf(remote, "Failed to save resume state: %v", saveErr) + } + } + stateMu.Unlock() + } + return nil }) } @@ -242,6 +619,13 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, } uploadedOK = true // file is definitely uploaded OK so no need to abort + // Delete state file on successful completion + if resumeEnabled && localPath != "" { + if err := deleteResumeState(localPath); err != nil { + fs.Debugf(remote, "Failed to delete resume state: %v", err) + } + } + obj, err := f.NewObject(ctx, remote) if err != nil { return nil, fmt.Errorf("multi-thread copy: failed to find object after copy: %w", err) diff --git a/fs/operations/operations.go b/fs/operations/operations.go index a10ad41b2..5031e6f7d 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -1873,8 +1873,50 @@ func copyURLFn(ctx context.Context, dstFileName string, url string, autoFilename return fn(ctx, dstFileName, resp.Body, resp.ContentLength, modTime) } +func CopyURLMulti(ctx context.Context, fdst fs.Fs, dstFileName string, srcObj fs.Object, overwrite bool) (dst fs.Object, err error) { + ci := fs.GetConfig(ctx) + + destObj, err := fdst.NewObject(ctx, dstFileName) + + if err != nil && !errors.Is(err, fs.ErrorObjectNotFound) { + return nil, err + } + + needsCopy := overwrite || errors.Is(err, fs.ErrorObjectNotFound) || (destObj != nil && NeedTransfer(ctx, destObj, srcObj)) + + if needsCopy { + tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, srcObj.Size(), nil, fdst) + defer func() { + tr.Done(ctx, err) + }() + return multiThreadCopy(ctx, fdst, dstFileName, srcObj, ci.MultiThreadStreams, tr) + } + return destObj, nil +} + // CopyURL copies the data from the url to (fdst, dstFileName) func CopyURL(ctx context.Context, fdst fs.Fs, dstFileName string, url string, autoFilename, dstFileNameFromHeader bool, noClobber bool) (dst fs.Object, err error) { + ci := fs.GetConfig(ctx) + if ci.MultiThreadStreams > 0 { + filename := dstFileName + srcObj, err := object.NewHTTPObject(ctx, url, dstFileNameFromHeader) + if err != nil { + return nil, err + } + if autoFilename { + filename = srcObj.Remote() + if filename == "." || filename == "/" { + return nil, fmt.Errorf("CopyURL failed: file name wasn't found in url") + } + } + if noClobber { + _, err = fdst.NewObject(ctx, filename) + if err == nil { + return nil, errors.New("CopyURL failed: file already exist") + } + } + return CopyURLMulti(ctx, fdst, filename, srcObj, false) + } err = copyURLFn(ctx, dstFileName, url, autoFilename, dstFileNameFromHeader, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { if noClobber { _, err = fdst.NewObject(ctx, dstFileName) From 8771492bf07eeb4fae9e922c02f3723e5ca5c9c5 Mon Sep 17 00:00:00 2001 From: Benji Date: Wed, 4 Feb 2026 16:29:27 +0100 Subject: [PATCH 4/9] fix: remove NUL characters to fix build --- backend/all/all.go | Bin 6960 -> 6906 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/backend/all/all.go b/backend/all/all.go index 64be00614a0d5f9a459b20e5ba0d75baa18efb7a..0018a9643b4a96a9680c087d542e3634e9714665 100644 GIT binary patch delta 15 WcmdmB_RDla$7By?kIhVsT2cTtDFsdd delta 59 zcmexmy1{HhhqgY0K7#^-BSSJn2}34B8AB?Nmj-0zfLX;1l?=r|nN)^chT@Hll~OFc H3|tHVtho&f From d0fbe35f7b92d2125b9837980e3fa13755a4dd8a Mon Sep 17 00:00:00 2001 From: Benji Date: Wed, 4 Feb 2026 16:38:04 +0100 Subject: [PATCH 5/9] chore: convert all Go files to UTF-8 encoding - Converted 1059 .go files from various encodings to UTF-8 - Ensures consistent file encoding across the repository - Prevents encoding-related issues in CI/CD pipelines --- backend/alias/alias.go | 2 +- backend/alias/alias_internal_test.go | 2 +- backend/alist/alist.go | 2 +- backend/all/all.go | Bin 6906 -> 6913 bytes backend/alldebrid/alldebrid.go | 2 +- backend/alldebrid/alldebrid_test.go | 2 +- backend/alldebrid/api/types.go | 2 +- backend/azureblob/azureblob.go | 2 +- backend/azureblob/azureblob_internal_test.go | 2 +- backend/azureblob/azureblob_test.go | 2 +- backend/azureblob/azureblob_unsupported.go | 2 +- backend/azurefiles/azurefiles.go | 2 +- .../azurefiles/azurefiles_internal_test.go | 2 +- backend/azurefiles/azurefiles_test.go | 2 +- backend/azurefiles/azurefiles_unsupported.go | 2 +- backend/b2/api/types.go | 2 +- backend/b2/api/types_test.go | 2 +- backend/b2/b2.go | 2 +- backend/b2/b2_internal_test.go | 2 +- backend/b2/b2_test.go | 2 +- backend/b2/upload.go | 2 +- backend/box/api/types.go | 2 +- backend/box/box.go | 2 +- backend/box/box_test.go | 2 +- backend/box/upload.go | 2 +- backend/cache/cache.go | 2 +- backend/cache/cache_internal_test.go | 2 +- backend/cache/cache_test.go | 2 +- backend/cache/cache_unsupported.go | 2 +- backend/cache/cache_upload_test.go | 2 +- backend/cache/directory.go | 2 +- backend/cache/handle.go | 2 +- backend/cache/object.go | 2 +- backend/cache/plex.go | 2 +- backend/cache/storage_memory.go | 2 +- backend/cache/storage_persistent.go | 2 +- backend/cache/utils_test.go | 2 +- backend/chunker/chunker.go | 2 +- backend/chunker/chunker_internal_test.go | 2 +- backend/chunker/chunker_test.go | 2 +- backend/cloudinary/api/types.go | 2 +- backend/cloudinary/cloudinary.go | 2 +- backend/cloudinary/cloudinary_test.go | 2 +- backend/combine/combine.go | 2 +- backend/combine/combine_internal_test.go | 2 +- backend/combine/combine_test.go | 2 +- backend/compress/compress.go | 2 +- backend/compress/compress_test.go | 2 +- backend/crypt/cipher.go | 2 +- backend/crypt/cipher_test.go | 2 +- backend/crypt/crypt.go | 2 +- backend/crypt/crypt_internal_test.go | 2 +- backend/crypt/crypt_test.go | 2 +- backend/crypt/pkcs7/pkcs7.go | 2 +- backend/crypt/pkcs7/pkcs7_test.go | 2 +- backend/doi/api/dataversetypes.go | 2 +- backend/doi/api/inveniotypes.go | 2 +- backend/doi/api/types.go | 2 +- backend/doi/dataverse.go | 2 +- backend/doi/doi.go | 2 +- backend/doi/doi_internal_test.go | 2 +- backend/doi/doi_test.go | 2 +- backend/doi/invenio.go | 2 +- backend/doi/link_header.go | 2 +- backend/doi/link_header_internal_test.go | 2 +- backend/doi/zenodo.go | 2 +- backend/drive/drive.go | 2 +- backend/drive/drive_internal_test.go | 2 +- backend/drive/drive_test.go | 2 +- backend/drive/metadata.go | 2 +- backend/drive/upload.go | 2 +- backend/dropbox/batcher.go | 2 +- backend/dropbox/dbhash/dbhash.go | 2 +- backend/dropbox/dbhash/dbhash_test.go | 2 +- backend/dropbox/dropbox.go | 2 +- backend/dropbox/dropbox_internal_test.go | 2 +- backend/dropbox/dropbox_test.go | 2 +- backend/fichier/api.go | 2 +- backend/fichier/fichier.go | 2 +- backend/fichier/fichier_test.go | 2 +- backend/fichier/object.go | 2 +- backend/fichier/structs.go | 2 +- backend/filefabric/api/types.go | 2 +- backend/filefabric/filefabric.go | 2 +- backend/filefabric/filefabric_test.go | 2 +- backend/filelu/api/types.go | 2 +- backend/filelu/filelu.go | 2 +- backend/filelu/filelu_client.go | 2 +- backend/filelu/filelu_file_uploader.go | 2 +- backend/filelu/filelu_helper.go | 2 +- backend/filelu/filelu_object.go | 2 +- backend/filelu/filelu_test.go | 2 +- backend/filelu/utils.go | 2 +- backend/filescom/filescom.go | 2 +- backend/filescom/filescom_test.go | 2 +- backend/ftp/ftp.go | 2 +- backend/ftp/ftp_internal_test.go | 2 +- backend/ftp/ftp_test.go | 2 +- backend/gofile/api/types.go | 2 +- backend/gofile/gofile.go | 2 +- backend/gofile/gofile_test.go | 2 +- .../googlecloudstorage/googlecloudstorage.go | 2 +- .../googlecloudstorage_test.go | 2 +- backend/googlephotos/albums.go | 2 +- backend/googlephotos/albums_test.go | 2 +- backend/googlephotos/api/types.go | 2 +- backend/googlephotos/googlephotos.go | 2 +- backend/googlephotos/googlephotos_test.go | 2 +- backend/googlephotos/pattern.go | 2 +- backend/googlephotos/pattern_test.go | 2 +- backend/hasher/commands.go | 2 +- backend/hasher/hasher.go | 2 +- backend/hasher/hasher_internal_test.go | 2 +- backend/hasher/hasher_test.go | 2 +- backend/hasher/kv.go | 2 +- backend/hasher/object.go | 2 +- backend/hdfs/fs.go | 2 +- backend/hdfs/hdfs.go | 2 +- backend/hdfs/hdfs_test.go | 2 +- backend/hdfs/hdfs_unsupported.go | 2 +- backend/hdfs/object.go | 2 +- backend/hidrive/api/queries.go | 2 +- backend/hidrive/api/types.go | 2 +- backend/hidrive/helpers.go | 2 +- backend/hidrive/hidrive.go | 2 +- backend/hidrive/hidrive_test.go | 2 +- backend/hidrive/hidrivehash/hidrivehash.go | 2 +- .../hidrive/hidrivehash/hidrivehash_test.go | 2 +- .../hidrive/hidrivehash/internal/internal.go | 2 +- backend/http/http.go | 2 +- backend/http/http_internal_test.go | 2 +- backend/iclouddrive/api/client.go | 2 +- backend/iclouddrive/api/drive.go | 2 +- backend/iclouddrive/api/photos.go | 2 +- backend/iclouddrive/api/session.go | 2 +- backend/iclouddrive/icloud.go | 2 +- backend/iclouddrive/iclouddrive.go | 2 +- backend/iclouddrive/iclouddrive_test.go | 2 +- .../iclouddrive/iclouddrive_unsupported.go | 2 +- backend/iclouddrive/icloudphotos.go | 2 +- backend/imagekit/client/client.go | 2 +- backend/imagekit/client/media.go | 2 +- backend/imagekit/client/upload.go | 2 +- backend/imagekit/client/url.go | 2 +- backend/imagekit/imagekit.go | 2 +- backend/imagekit/imagekit_test.go | 2 +- backend/imagekit/util.go | 2 +- backend/internetarchive/internetarchive.go | 2 +- .../internetarchive/internetarchive_test.go | 2 +- backend/jottacloud/api/types.go | 2 +- backend/jottacloud/api/types_test.go | 2 +- backend/jottacloud/jottacloud.go | 2 +- .../jottacloud/jottacloud_internal_test.go | 2 +- backend/jottacloud/jottacloud_test.go | 2 +- backend/koofr/koofr.go | 2 +- backend/koofr/koofr_test.go | 2 +- backend/linkbox/linkbox.go | 2 +- backend/linkbox/linkbox_test.go | 2 +- backend/local/about_unix.go | 2 +- backend/local/about_windows.go | 2 +- backend/local/clone_darwin.go | 2 +- backend/local/fadvise_other.go | 2 +- backend/local/fadvise_unix.go | 2 +- backend/local/lchmod.go | 2 +- backend/local/lchmod_unix.go | 2 +- backend/local/lchtimes.go | 2 +- backend/local/lchtimes_unix.go | 2 +- backend/local/lchtimes_windows.go | 2 +- backend/local/local.go | 2 +- backend/local/local_internal_test.go | 2 +- backend/local/local_internal_windows_test.go | 2 +- backend/local/local_test.go | 2 +- backend/local/metadata.go | 2 +- backend/local/metadata_bsd.go | 2 +- backend/local/metadata_linux.go | 2 +- backend/local/metadata_other.go | 2 +- backend/local/metadata_unix.go | 2 +- backend/local/metadata_windows.go | 2 +- backend/local/read_device_other.go | 2 +- backend/local/read_device_unix.go | 2 +- backend/local/remove_other.go | 2 +- backend/local/remove_test.go | 2 +- backend/local/remove_windows.go | 2 +- backend/local/setbtime.go | 2 +- backend/local/setbtime_windows.go | 2 +- backend/local/symlink.go | 2 +- backend/local/symlink_other.go | 2 +- backend/local/tests_test.go | 2 +- backend/local/xattr.go | 2 +- backend/local/xattr_unsupported.go | 2 +- backend/mailru/api/bin.go | 2 +- backend/mailru/api/helpers.go | 2 +- backend/mailru/api/m1.go | 2 +- backend/mailru/mailru.go | 2 +- backend/mailru/mailru_test.go | 2 +- backend/mailru/mrhash/mrhash.go | 2 +- backend/mailru/mrhash/mrhash_test.go | 2 +- backend/mega/mega.go | 2 +- backend/mega/mega_test.go | 2 +- backend/memory/memory.go | 2 +- backend/memory/memory_internal_test.go | 2 +- backend/memory/memory_test.go | 2 +- backend/netstorage/netstorage.go | 2 +- backend/netstorage/netstorage_test.go | 2 +- backend/onedrive/api/types.go | 2 +- backend/onedrive/metadata.go | 2 +- backend/onedrive/metadata_test.go | 2 +- backend/onedrive/onedrive.go | 2 +- backend/onedrive/onedrive_internal_test.go | 2 +- backend/onedrive/onedrive_test.go | 2 +- backend/onedrive/quickxorhash/quickxorhash.go | 2 +- .../quickxorhash/quickxorhash_test.go | 2 +- backend/opendrive/opendrive.go | 2 +- backend/opendrive/opendrive_test.go | 2 +- backend/opendrive/types.go | 2 +- backend/oracleobjectstorage/byok.go | 2 +- backend/oracleobjectstorage/client.go | 2 +- backend/oracleobjectstorage/command.go | 2 +- backend/oracleobjectstorage/copy.go | 2 +- backend/oracleobjectstorage/multipart.go | 2 +- backend/oracleobjectstorage/object.go | 2 +- backend/oracleobjectstorage/options.go | 2 +- .../oracleobjectstorage.go | 2 +- .../oracleobjectstorage_test.go | 2 +- .../oracleobjectstorage_unsupported.go | 2 +- backend/oracleobjectstorage/waiter.go | 2 +- backend/pcloud/api/types.go | 2 +- backend/pcloud/pcloud.go | 2 +- backend/pcloud/pcloud_test.go | 2 +- backend/pcloud/writer_at.go | 2 +- backend/pikpak/api/types.go | 2 +- backend/pikpak/api/types_test.go | 2 +- backend/pikpak/helper.go | 2 +- backend/pikpak/multipart.go | 2 +- backend/pikpak/pikpak.go | 2 +- backend/pikpak/pikpak_test.go | 2 +- backend/pixeldrain/api_client.go | 2 +- backend/pixeldrain/pixeldrain.go | 2 +- backend/pixeldrain/pixeldrain_test.go | 2 +- backend/premiumizeme/api/types.go | 2 +- backend/premiumizeme/premiumizeme.go | 2 +- backend/premiumizeme/premiumizeme_test.go | 2 +- backend/protondrive/protondrive.go | 2 +- backend/protondrive/protondrive_test.go | 2 +- backend/putio/error.go | 2 +- backend/putio/fs.go | 2 +- backend/putio/object.go | 2 +- backend/putio/putio.go | 2 +- backend/putio/putio_test.go | 2 +- backend/qingstor/qingstor.go | 2 +- backend/qingstor/qingstor_test.go | 2 +- backend/qingstor/qingstor_unsupported.go | 2 +- backend/qingstor/upload.go | 2 +- backend/quatrix/api/types.go | 2 +- backend/quatrix/quatrix.go | 2 +- backend/quatrix/quatrix_test.go | 2 +- backend/quatrix/upload_memory.go | 2 +- backend/s3/gen_setfrom.go | 2 +- backend/s3/ibm_signer.go | 2 +- backend/s3/ibm_signer_test.go | 2 +- backend/s3/s3.go | 2 +- backend/s3/s3_internal_test.go | 2 +- backend/s3/s3_test.go | 2 +- backend/s3/setfrom.go | 2 +- backend/s3/v2sign.go | 2 +- backend/seafile/api/types.go | 2 +- backend/seafile/object.go | 2 +- backend/seafile/pacer.go | 2 +- backend/seafile/renew.go | 2 +- backend/seafile/renew_test.go | 2 +- backend/seafile/seafile.go | 2 +- backend/seafile/seafile_internal_test.go | 2 +- backend/seafile/seafile_test.go | 2 +- backend/seafile/webapi.go | 2 +- backend/sftp/sftp.go | 2 +- backend/sftp/sftp_internal_test.go | 2 +- backend/sftp/sftp_test.go | 2 +- backend/sftp/sftp_unsupported.go | 2 +- backend/sftp/ssh.go | 2 +- backend/sftp/ssh_external.go | 2 +- backend/sftp/ssh_internal.go | 2 +- backend/sftp/stringlock.go | 2 +- backend/sftp/stringlock_test.go | 2 +- backend/sharefile/api/types.go | 2 +- backend/sharefile/generate_tzdata.go | 2 +- backend/sharefile/sharefile.go | 2 +- backend/sharefile/sharefile_test.go | 2 +- backend/sharefile/tzdata_vfsdata.go | 2 +- backend/sharefile/upload.go | 2 +- backend/sia/api/types.go | 2 +- backend/sia/sia.go | 2 +- backend/sia/sia_test.go | 2 +- backend/smb/connpool.go | 2 +- backend/smb/filepool.go | 2 +- backend/smb/filepool_test.go | 2 +- backend/smb/kerberos.go | 2 +- backend/smb/kerberos_test.go | 2 +- backend/smb/smb.go | 2 +- backend/smb/smb_internal_test.go | 2 +- backend/smb/smb_test.go | 2 +- backend/storj/fs.go | 2 +- backend/storj/object.go | 2 +- backend/storj/storj_test.go | 2 +- backend/storj/storj_unsupported.go | 2 +- backend/sugarsync/api/types.go | 2 +- backend/sugarsync/sugarsync.go | 2 +- backend/sugarsync/sugarsync_internal_test.go | 2 +- backend/sugarsync/sugarsync_test.go | 2 +- backend/swift/auth.go | 2 +- backend/swift/swift.go | 2 +- backend/swift/swift_internal_test.go | 2 +- backend/swift/swift_test.go | 2 +- backend/teldrive/api/types.go | 2 +- backend/teldrive/tdhash/tdhash.go | 2 +- backend/teldrive/teldrive.go | 2 +- backend/teldrive/teldrive_test.go | 2 +- backend/teldrive/upload.go | 2 +- backend/terabox/api.go | 2 +- backend/terabox/api/errors.go | 2 +- backend/terabox/api/types.go | 2 +- backend/terabox/terabox.go | 2 +- backend/terabox/terabox_test.go | 2 +- backend/terabox/util.go | 2 +- backend/ulozto/api/types.go | 2 +- backend/ulozto/ulozto.go | 2 +- backend/ulozto/ulozto_test.go | 2 +- backend/union/common/options.go | 2 +- backend/union/entry.go | 2 +- backend/union/errors.go | 2 +- backend/union/errors_test.go | 2 +- backend/union/policy/all.go | 2 +- backend/union/policy/epall.go | 2 +- backend/union/policy/epff.go | 2 +- backend/union/policy/eplfs.go | 2 +- backend/union/policy/eplno.go | 2 +- backend/union/policy/eplus.go | 2 +- backend/union/policy/epmfs.go | 2 +- backend/union/policy/eprand.go | 2 +- backend/union/policy/ff.go | 2 +- backend/union/policy/lfs.go | 2 +- backend/union/policy/lno.go | 2 +- backend/union/policy/lus.go | 2 +- backend/union/policy/mfs.go | 2 +- backend/union/policy/newest.go | 2 +- backend/union/policy/policy.go | 2 +- backend/union/policy/rand.go | 2 +- backend/union/union.go | 2 +- backend/union/union_internal_test.go | 2 +- backend/union/union_test.go | 2 +- backend/union/upstream/upstream.go | 2 +- backend/uptobox/api/types.go | 2 +- backend/uptobox/uptobox.go | 2 +- backend/uptobox/uptobox_test.go | 2 +- backend/webdav/api/types.go | 2 +- backend/webdav/chunking.go | 2 +- backend/webdav/odrvcookie/fetch.go | 2 +- backend/webdav/odrvcookie/renew.go | 2 +- backend/webdav/tus-errors.go | 2 +- backend/webdav/tus-upload.go | 2 +- backend/webdav/tus-uploader.go | 2 +- backend/webdav/tus.go | 2 +- backend/webdav/webdav.go | 2 +- backend/webdav/webdav_internal_test.go | 2 +- backend/webdav/webdav_test.go | 2 +- backend/yandex/api/types.go | 2 +- backend/yandex/yandex.go | 2 +- backend/yandex/yandex_test.go | 2 +- backend/zoho/api/types.go | 2 +- backend/zoho/zoho.go | 2 +- backend/zoho/zoho_test.go | 2 +- bin/check-merged.go | 2 +- bin/cross-compile.go | 2 +- bin/get-github-release.go | 2 +- bin/make_bisync_docs.go | 2 +- bin/not-in-stable.go | 2 +- bin/resource_windows.go | 2 +- bin/rules.go | 2 +- bin/test_independence.go | 2 +- cmd/about/about.go | 2 +- cmd/all/all.go | 2 +- cmd/authorize/authorize.go | 2 +- cmd/authorize/authorize_test.go | 2 +- cmd/backend/backend.go | 2 +- cmd/bisync/bilib/canonical.go | 2 +- cmd/bisync/bilib/files.go | 2 +- cmd/bisync/bilib/names.go | 2 +- cmd/bisync/bilib/output.go | 2 +- cmd/bisync/bisync_debug_test.go | 2 +- cmd/bisync/bisync_test.go | 2 +- cmd/bisync/checkfn.go | 2 +- cmd/bisync/cmd.go | 2 +- cmd/bisync/compare.go | 2 +- cmd/bisync/deltas.go | 2 +- cmd/bisync/help.go | 2 +- cmd/bisync/listing.go | 2 +- cmd/bisync/lockfile.go | 2 +- cmd/bisync/log.go | 2 +- cmd/bisync/march.go | 2 +- cmd/bisync/operations.go | 2 +- cmd/bisync/queue.go | 2 +- cmd/bisync/rc.go | 2 +- cmd/bisync/resolve.go | 2 +- cmd/bisync/resync.go | 2 +- cmd/cachestats/cachestats.go | 2 +- cmd/cachestats/cachestats_unsupported.go | 2 +- cmd/cat/cat.go | 2 +- cmd/check/check.go | 2 +- cmd/checksum/checksum.go | 2 +- cmd/cleanup/cleanup.go | 2 +- cmd/cmd.go | 2 +- cmd/cmount/arch.go | 2 +- cmd/cmount/fs.go | 2 +- cmd/cmount/mount.go | 2 +- cmd/cmount/mount_brew.go | 2 +- cmd/cmount/mount_test.go | 2 +- cmd/cmount/mount_unsupported.go | 2 +- cmd/cmount/mountpoint_other.go | 2 +- cmd/cmount/mountpoint_windows.go | 2 +- cmd/completion.go | 2 +- cmd/config/config.go | 2 +- cmd/config/config_test.go | 2 +- cmd/convmv/convmv.go | 2 +- cmd/convmv/convmv_test.go | 2 +- cmd/copy/copy.go | 2 +- cmd/copyto/copyto.go | 2 +- cmd/copyurl/copyurl.go | 2 +- cmd/copyurl/copyurl_test.go | 2 +- cmd/cryptcheck/cryptcheck.go | 2 +- cmd/cryptdecode/cryptdecode.go | 2 +- cmd/dedupe/dedupe.go | 2 +- cmd/delete/delete.go | 2 +- cmd/deletefile/deletefile.go | 2 +- cmd/genautocomplete/genautocomplete.go | 2 +- cmd/genautocomplete/genautocomplete_bash.go | 2 +- cmd/genautocomplete/genautocomplete_fish.go | 2 +- .../genautocomplete_powershell.go | 2 +- cmd/genautocomplete/genautocomplete_test.go | 2 +- cmd/genautocomplete/genautocomplete_zsh.go | 2 +- cmd/gendocs/gendocs.go | 2 +- cmd/gitannex/configparse.go | 2 +- cmd/gitannex/e2e_test.go | 2 +- cmd/gitannex/gitannex.go | 2 +- cmd/gitannex/gitannex_test.go | 2 +- cmd/gitannex/layout.go | 2 +- cmd/hashsum/hashsum.go | 2 +- cmd/help.go | 2 +- cmd/link/link.go | 2 +- cmd/listremotes/listremotes.go | 2 +- cmd/ls/ls.go | 2 +- cmd/ls/lshelp/lshelp.go | 2 +- cmd/lsd/lsd.go | 2 +- cmd/lsf/lsf.go | 2 +- cmd/lsf/lsf_test.go | 2 +- cmd/lsjson/lsjson.go | 2 +- cmd/lsl/lsl.go | 2 +- cmd/md5sum/md5sum.go | 2 +- cmd/mkdir/mkdir.go | 2 +- cmd/mount/dir.go | 2 +- cmd/mount/file.go | 2 +- cmd/mount/fs.go | 2 +- cmd/mount/handle.go | 2 +- cmd/mount/mount.go | 2 +- cmd/mount/mount_test.go | 2 +- cmd/mount/mount_unsupported.go | 2 +- cmd/mount/test/seek_speed.go | 2 +- cmd/mount/test/seeker.go | 2 +- cmd/mount/test/seekers.go | 2 +- cmd/mount2/file.go | 2 +- cmd/mount2/fs.go | 2 +- cmd/mount2/mount.go | 2 +- cmd/mount2/mount_test.go | 2 +- cmd/mount2/mount_unsupported.go | 2 +- cmd/mount2/node.go | 2 +- cmd/mountlib/check_linux.go | 2 +- cmd/mountlib/check_other.go | 2 +- cmd/mountlib/mount.go | 2 +- cmd/mountlib/rc.go | 2 +- cmd/mountlib/rc_test.go | 2 +- cmd/mountlib/utils.go | 2 +- cmd/move/move.go | 2 +- cmd/moveto/moveto.go | 2 +- cmd/ncdu/ncdu.go | 2 +- cmd/ncdu/ncdu_unsupported.go | 2 +- cmd/ncdu/scan/scan.go | 2 +- cmd/nfsmount/nfsmount.go | 2 +- cmd/nfsmount/nfsmount_test.go | 2 +- cmd/nfsmount/nfsmount_unsupported.go | 2 +- cmd/obscure/obscure.go | 2 +- cmd/progress.go | 2 +- cmd/purge/purge.go | 2 +- cmd/rc/rc.go | 2 +- cmd/rcat/rcat.go | 2 +- cmd/rcd/rcd.go | 2 +- cmd/reveal/reveal.go | 2 +- cmd/rmdir/rmdir.go | 2 +- cmd/rmdirs/rmdirs.go | 2 +- cmd/selfupdate/noselfupdate.go | 2 +- cmd/selfupdate/selfupdate.go | 2 +- cmd/selfupdate/selfupdate_test.go | 2 +- cmd/selfupdate/verify.go | 2 +- cmd/selfupdate/verify_test.go | 2 +- cmd/selfupdate/writable_unix.go | 2 +- cmd/selfupdate/writable_unsupported.go | 2 +- cmd/selfupdate/writable_windows.go | 2 +- cmd/selfupdate_disabled.go | 2 +- cmd/selfupdate_enabled.go | 2 +- cmd/serve/dlna/cds.go | 2 +- cmd/serve/dlna/cds_test.go | 2 +- cmd/serve/dlna/cms.go | 2 +- cmd/serve/dlna/data/assets_generate.go | 2 +- cmd/serve/dlna/data/assets_vfsdata.go | 2 +- cmd/serve/dlna/data/data.go | 2 +- cmd/serve/dlna/dlna.go | 2 +- cmd/serve/dlna/dlna_test.go | 2 +- cmd/serve/dlna/dlna_util.go | 2 +- cmd/serve/dlna/mrrs.go | 2 +- cmd/serve/dlna/upnpav/upnpav.go | 2 +- cmd/serve/docker/api.go | 2 +- cmd/serve/docker/docker.go | 2 +- cmd/serve/docker/docker_test.go | 2 +- cmd/serve/docker/driver.go | 2 +- cmd/serve/docker/options.go | 2 +- cmd/serve/docker/options_test.go | 2 +- cmd/serve/docker/serve.go | 2 +- cmd/serve/docker/systemd.go | 2 +- cmd/serve/docker/systemd_unsupported.go | 2 +- cmd/serve/docker/unix.go | 2 +- cmd/serve/docker/unix_unsupported.go | 2 +- cmd/serve/docker/volume.go | 2 +- cmd/serve/ftp/ftp.go | 2 +- cmd/serve/ftp/ftp_test.go | 2 +- cmd/serve/ftp/ftp_unsupported.go | 2 +- cmd/serve/http/http.go | 2 +- cmd/serve/http/http_test.go | 2 +- cmd/serve/nfs/cache.go | 2 +- cmd/serve/nfs/cache_test.go | 2 +- cmd/serve/nfs/filesystem.go | 2 +- cmd/serve/nfs/handler.go | 2 +- cmd/serve/nfs/nfs.go | 2 +- cmd/serve/nfs/nfs_test.go | 2 +- cmd/serve/nfs/nfs_unsupported.go | 2 +- cmd/serve/nfs/server.go | 2 +- cmd/serve/nfs/symlink_cache_linux.go | 2 +- cmd/serve/nfs/symlink_cache_other.go | 2 +- cmd/serve/proxy/proxy.go | 2 +- cmd/serve/proxy/proxy_code.go | 2 +- cmd/serve/proxy/proxy_test.go | 2 +- cmd/serve/proxy/proxyflags/proxyflags.go | 2 +- cmd/serve/rc.go | 2 +- cmd/serve/rc_test.go | 2 +- cmd/serve/restic/cache.go | 2 +- cmd/serve/restic/cache_test.go | 2 +- cmd/serve/restic/restic.go | 2 +- cmd/serve/restic/restic_appendonly_test.go | 2 +- cmd/serve/restic/restic_privaterepos_test.go | 2 +- cmd/serve/restic/restic_test.go | 2 +- cmd/serve/restic/restic_utils_test.go | 2 +- cmd/serve/restic/stdio_conn.go | 2 +- cmd/serve/s3/backend.go | 2 +- cmd/serve/s3/ioutils.go | 2 +- cmd/serve/s3/list.go | 2 +- cmd/serve/s3/logger.go | 2 +- cmd/serve/s3/pager.go | 2 +- cmd/serve/s3/s3.go | 2 +- cmd/serve/s3/s3_test.go | 2 +- cmd/serve/s3/server.go | 2 +- cmd/serve/s3/utils.go | 2 +- cmd/serve/serve.go | 2 +- cmd/serve/servetest/proxy_code.go | 2 +- cmd/serve/servetest/rc.go | 2 +- cmd/serve/servetest/servetest.go | 2 +- cmd/serve/sftp/connection.go | 2 +- cmd/serve/sftp/connection_test.go | 2 +- cmd/serve/sftp/handler.go | 2 +- cmd/serve/sftp/server.go | 2 +- cmd/serve/sftp/sftp.go | 2 +- cmd/serve/sftp/sftp_test.go | 2 +- cmd/serve/sftp/sftp_unsupported.go | 2 +- cmd/serve/webdav/webdav.go | 2 +- cmd/serve/webdav/webdav_test.go | 2 +- cmd/settier/settier.go | 2 +- cmd/sha1sum/sha1sum.go | 2 +- cmd/siginfo_bsd.go | 2 +- cmd/siginfo_others.go | 2 +- cmd/size/size.go | 2 +- cmd/sync/sync.go | 2 +- cmd/test/changenotify/changenotify.go | 2 +- cmd/test/histogram/histogram.go | 2 +- cmd/test/info/base32768.go | 2 +- cmd/test/info/info.go | 2 +- cmd/test/info/internal/build_csv/main.go | 2 +- cmd/test/info/internal/internal.go | 2 +- cmd/test/makefiles/makefiles.go | 2 +- cmd/test/makefiles/speed.go | 2 +- cmd/test/memory/memory.go | 2 +- cmd/test/test.go | 2 +- cmd/touch/touch.go | 2 +- cmd/touch/touch_test.go | 2 +- cmd/tree/tree.go | 2 +- cmd/tree/tree_test.go | 2 +- cmd/version/version.go | 2 +- cmd/version/version_test.go | 2 +- cmdtest/cmdtest.go | 2 +- cmdtest/cmdtest_test.go | 2 +- cmdtest/environment_test.go | 2 +- fs/accounting/accounting.go | 2 +- fs/accounting/accounting_other.go | 2 +- fs/accounting/accounting_test.go | 2 +- fs/accounting/accounting_unix.go | 2 +- fs/accounting/inprogress.go | 2 +- fs/accounting/prometheus.go | 2 +- fs/accounting/stats.go | 2 +- fs/accounting/stats_groups.go | 2 +- fs/accounting/stats_groups_test.go | 2 +- fs/accounting/stats_test.go | 2 +- fs/accounting/token_bucket.go | 2 +- fs/accounting/token_bucket_test.go | 2 +- fs/accounting/tpslimit.go | 2 +- fs/accounting/tpslimit_test.go | 2 +- fs/accounting/transfer.go | 2 +- fs/accounting/transfer_test.go | 2 +- fs/accounting/transfermap.go | 2 +- fs/asyncreader/asyncreader.go | 2 +- fs/asyncreader/asyncreader_test.go | 2 +- fs/backend_config.go | 2 +- fs/backend_config_test.go | 2 +- fs/bits.go | 2 +- fs/bits_test.go | 2 +- fs/bwtimetable.go | 2 +- fs/bwtimetable_test.go | 2 +- fs/cache/cache.go | 2 +- fs/cache/cache_test.go | 2 +- fs/chunkedreader/chunkedreader.go | 2 +- fs/chunkedreader/chunkedreader_test.go | 2 +- fs/chunkedreader/parallel.go | 2 +- fs/chunkedreader/parallel_test.go | 2 +- fs/chunkedreader/sequential.go | 2 +- fs/chunkedreader/sequential_test.go | 2 +- fs/chunksize/chunksize.go | 2 +- fs/chunksize/chunksize_test.go | 2 +- fs/config.go | 2 +- fs/config/authorize.go | 2 +- fs/config/config.go | 2 +- fs/config/config_read_password.go | 2 +- fs/config/config_read_password_unsupported.go | 2 +- fs/config/config_test.go | 2 +- fs/config/configfile/configfile.go | 2 +- fs/config/configfile/configfile_other.go | 2 +- fs/config/configfile/configfile_test.go | 2 +- fs/config/configfile/configfile_unix.go | 2 +- fs/config/configflags/configflags.go | 2 +- fs/config/configmap/configmap.go | 2 +- .../configmap/configmap_external_test.go | 2 +- fs/config/configmap/configmap_test.go | 2 +- fs/config/configstruct/configstruct.go | 2 +- fs/config/configstruct/configstruct_test.go | 2 +- fs/config/configstruct/internal_test.go | 2 +- fs/config/crypt.go | 2 +- fs/config/crypt_internal_test.go | 2 +- fs/config/crypt_test.go | 2 +- fs/config/default_storage.go | 2 +- fs/config/default_storage_test.go | 2 +- fs/config/flags/flags.go | 2 +- fs/config/obscure/obscure.go | 2 +- fs/config/obscure/obscure_test.go | 2 +- fs/config/rc.go | 2 +- fs/config/rc_test.go | 2 +- fs/config/ui.go | 2 +- fs/config/ui_test.go | 2 +- fs/config_list.go | 2 +- fs/config_list_test.go | 2 +- fs/config_test.go | 2 +- fs/configmap.go | 2 +- fs/countsuffix.go | 2 +- fs/countsuffix_test.go | 2 +- fs/cutoffmode.go | 2 +- fs/cutoffmode_test.go | 2 +- fs/daemon_other.go | 2 +- fs/daemon_unix.go | 2 +- fs/deletemode.go | 2 +- fs/dir.go | 2 +- fs/dir_wrapper.go | 2 +- fs/direntries.go | 2 +- fs/direntries_test.go | 2 +- fs/dirtree/dirtree.go | 2 +- fs/dirtree/dirtree_test.go | 2 +- fs/driveletter/driveletter.go | 2 +- fs/driveletter/driveletter_windows.go | 2 +- fs/dump.go | 2 +- fs/dump_test.go | 2 +- fs/enum.go | 2 +- fs/enum_test.go | 2 +- fs/features.go | 2 +- fs/filter/filter.go | 2 +- fs/filter/filter_test.go | 2 +- fs/filter/filterflags/filterflags.go | 2 +- fs/filter/glob.go | 2 +- fs/filter/glob_test.go | 2 +- fs/filter/rules.go | 2 +- fs/fingerprint.go | 2 +- fs/fingerprint_test.go | 2 +- fs/fs.go | 2 +- fs/fs_test.go | 2 +- fs/fserrors/enospc_error.go | 2 +- fs/fserrors/enospc_error_notsupported.go | 2 +- fs/fserrors/error.go | 2 +- fs/fserrors/error_syscall_test.go | 2 +- fs/fserrors/error_test.go | 2 +- fs/fserrors/retriable_errors.go | 2 +- fs/fserrors/retriable_errors_windows.go | 2 +- fs/fshttp/dialer.go | 2 +- fs/fshttp/http.go | 2 +- fs/fshttp/http_test.go | 2 +- fs/fshttp/prometheus.go | 2 +- fs/fspath/fuzz.go | 2 +- fs/fspath/path.go | 2 +- fs/fspath/path_test.go | 2 +- fs/hash/hash.go | 2 +- fs/hash/hash_test.go | 2 +- fs/list/helpers.go | 2 +- fs/list/helpers_test.go | 2 +- fs/list/list.go | 2 +- fs/list/list_test.go | 2 +- fs/list/sorter.go | 2 +- fs/list/sorter_test.go | 2 +- fs/log.go | 2 +- fs/log/event_log.go | 2 +- fs/log/event_log_windows.go | 2 +- fs/log/log.go | 2 +- fs/log/logflags/logflags.go | 2 +- fs/log/redirect_stderr.go | 2 +- fs/log/redirect_stderr_unix.go | 2 +- fs/log/redirect_stderr_windows.go | 2 +- fs/log/slog.go | 2 +- fs/log/slog_test.go | 2 +- fs/log/syslog.go | 2 +- fs/log/syslog_unix.go | 2 +- fs/log/systemd.go | 2 +- fs/log/systemd_unix.go | 2 +- fs/log_test.go | 2 +- fs/logger/logger.go | 2 +- fs/logger/logger_test.go | 2 +- fs/march/march.go | 2 +- fs/march/march_test.go | 2 +- fs/metadata.go | 2 +- fs/metadata_mapper_code.go | 2 +- fs/metadata_test.go | 2 +- fs/mimetype.go | 2 +- fs/mount_helper.go | 2 +- fs/mount_helper_test.go | 2 +- fs/newfs.go | 2 +- fs/newfs_internal_test.go | 2 +- fs/newfs_test.go | 2 +- fs/object/object.go | 2 +- fs/object/object_test.go | 2 +- fs/open_options.go | 2 +- fs/open_options_test.go | 2 +- fs/operations/check.go | 2 +- fs/operations/check_test.go | 2 +- fs/operations/copy.go | 2 +- fs/operations/copy_test.go | 2 +- fs/operations/dedupe.go | 2 +- fs/operations/dedupe_test.go | 2 +- fs/operations/listdirsorted_test.go | 2 +- fs/operations/logger.go | 2 +- fs/operations/lsjson.go | 2 +- fs/operations/lsjson_test.go | 2 +- fs/operations/multithread.go | 2 +- fs/operations/multithread_test.go | 2 +- fs/operations/operations.go | 2 +- fs/operations/operations_internal_test.go | 2 +- fs/operations/operations_test.go | 2 +- .../operationsflags/operationsflags.go | 2 +- fs/operations/rc.go | 2 +- fs/operations/rc_test.go | 2 +- fs/operations/reopen.go | 2 +- fs/operations/reopen_test.go | 2 +- fs/override.go | 2 +- fs/override_dir.go | 2 +- fs/override_dir_test.go | 2 +- fs/override_test.go | 2 +- fs/pacer.go | 2 +- fs/parseduration.go | 2 +- fs/parseduration_test.go | 2 +- fs/parsetime.go | 2 +- fs/parsetime_test.go | 2 +- fs/rc/cache.go | 2 +- fs/rc/cache_test.go | 2 +- fs/rc/config.go | 2 +- fs/rc/config_test.go | 2 +- fs/rc/internal.go | 2 +- fs/rc/internal_test.go | 2 +- fs/rc/jobs/job.go | 2 +- fs/rc/jobs/job_test.go | 2 +- fs/rc/js/main.go | 2 +- fs/rc/js/serve.go | 2 +- fs/rc/params.go | 2 +- fs/rc/params_test.go | 2 +- fs/rc/rc.go | 2 +- fs/rc/rc_test.go | 2 +- fs/rc/rcflags/rcflags.go | 2 +- fs/rc/rcserver/metrics.go | 2 +- fs/rc/rcserver/metrics_test.go | 2 +- fs/rc/rcserver/rcserver.go | 2 +- fs/rc/rcserver/rcserver_test.go | 2 +- fs/rc/registry.go | 2 +- fs/rc/webgui/plugins.go | 2 +- fs/rc/webgui/rc.go | 2 +- fs/rc/webgui/rc_test.go | 2 +- fs/rc/webgui/webgui.go | 2 +- fs/registry.go | 2 +- fs/sizesuffix.go | 2 +- fs/sizesuffix_test.go | 2 +- fs/sync/pipe.go | 2 +- fs/sync/pipe_test.go | 2 +- fs/sync/rc.go | 2 +- fs/sync/rc_test.go | 2 +- fs/sync/sync.go | 2 +- fs/sync/sync_test.go | 2 +- fs/sync/sync_transform_test.go | 2 +- fs/terminalcolormode.go | 2 +- fs/terminalcolormode_test.go | 2 +- fs/tristate.go | 2 +- fs/tristate_test.go | 2 +- fs/types.go | 2 +- fs/version.go | 2 +- fs/versioncheck.go | 2 +- fs/versionsuffix.go | 2 +- fs/versiontag.go | 2 +- fs/walk/walk.go | 2 +- fs/walk/walk_test.go | 2 +- fstest/fstest.go | 2 +- fstest/fstests/fstests.go | 2 +- fstest/mockdir/dir.go | 2 +- fstest/mockfs/mockfs.go | 2 +- fstest/mockobject/mockobject.go | 2 +- fstest/run.go | 2 +- fstest/runs/config.go | 2 +- fstest/runs/report.go | 2 +- fstest/runs/run.go | 2 +- fstest/runs/run_test.go | 2 +- fstest/test_all/clean.go | 2 +- fstest/test_all/test_all.go | 2 +- fstest/testserver/testserver.go | 2 +- fstest/testy/testy.go | 2 +- lib/atexit/atexit.go | 2 +- lib/atexit/atexit_other.go | 2 +- lib/atexit/atexit_test.go | 2 +- lib/atexit/atexit_unix.go | 2 +- lib/batcher/batcher.go | 2 +- lib/batcher/batcher_test.go | 2 +- lib/batcher/options.go | 2 +- lib/bucket/bucket.go | 2 +- lib/bucket/bucket_test.go | 2 +- lib/buildinfo/arch.go | 2 +- lib/buildinfo/cgo.go | 2 +- lib/buildinfo/osversion.go | 2 +- lib/buildinfo/osversion_windows.go | 2 +- lib/buildinfo/snap.go | 2 +- lib/buildinfo/tags.go | 2 +- lib/cache/cache.go | 2 +- lib/cache/cache_test.go | 2 +- lib/daemonize/daemon_other.go | 2 +- lib/daemonize/daemon_unix.go | 2 +- lib/debug/common.go | 2 +- lib/dircache/dircache.go | 2 +- lib/diskusage/diskusage.go | 2 +- lib/diskusage/diskusage_netbsd.go | 2 +- lib/diskusage/diskusage_openbsd.go | 2 +- lib/diskusage/diskusage_test.go | 2 +- lib/diskusage/diskusage_unix.go | 2 +- lib/diskusage/diskusage_unsupported.go | 2 +- lib/diskusage/diskusage_windows.go | 2 +- lib/encoder/encoder.go | 2 +- lib/encoder/encoder_cases_test.go | 2 +- lib/encoder/encoder_test.go | 2 +- lib/encoder/filename/decode.go | 2 +- lib/encoder/filename/decode_test.go | 2 +- lib/encoder/filename/encode.go | 2 +- lib/encoder/filename/fuzz.go | 2 +- lib/encoder/filename/gentable.go | 2 +- lib/encoder/filename/init.go | 2 +- lib/encoder/internal/gen/main.go | 2 +- lib/encoder/os_darwin.go | 2 +- lib/encoder/os_other.go | 2 +- lib/encoder/os_windows.go | 2 +- lib/encoder/standard.go | 2 +- lib/env/env.go | 2 +- lib/env/env_test.go | 2 +- lib/errcount/errcount.go | 2 +- lib/errcount/errcount_test.go | 2 +- lib/errors/errors.go | 2 +- lib/errors/errors_test.go | 2 +- lib/exitcode/exitcode.go | 2 +- lib/file/driveletter_other.go | 2 +- lib/file/driveletter_windows.go | 2 +- lib/file/file.go | 2 +- lib/file/file_other.go | 2 +- lib/file/file_test.go | 2 +- lib/file/file_windows.go | 2 +- lib/file/mkdir.go | 2 +- lib/file/preallocate.go | 2 +- lib/file/preallocate_other.go | 2 +- lib/file/preallocate_unix.go | 2 +- lib/file/preallocate_windows.go | 2 +- lib/file/unc.go | 2 +- lib/file/unc_test.go | 2 +- lib/file/unc_windows.go | 2 +- lib/http/auth.go | 2 +- lib/http/auth_test.go | 2 +- lib/http/context.go | 2 +- lib/http/middleware.go | 2 +- lib/http/middleware_test.go | 2 +- lib/http/serve/dir.go | 2 +- lib/http/serve/dir_test.go | 2 +- lib/http/serve/serve.go | 2 +- lib/http/serve/serve_test.go | 2 +- lib/http/server.go | 2 +- lib/http/server_test.go | 2 +- lib/http/template.go | 2 +- lib/http/template_test.go | 2 +- lib/israce/israce.go | 2 +- lib/israce/norace.go | 2 +- lib/jwtutil/jwtutil.go | 2 +- lib/kv/bolt.go | 2 +- lib/kv/internal_test.go | 2 +- lib/kv/types.go | 2 +- lib/kv/unsupported.go | 2 +- lib/mmap/mmap.go | 2 +- lib/mmap/mmap_test.go | 2 +- lib/mmap/mmap_unix.go | 2 +- lib/mmap/mmap_unsupported.go | 2 +- lib/mmap/mmap_windows.go | 2 +- lib/multipart/multipart.go | 2 +- lib/oauthutil/oauthutil.go | 2 +- lib/oauthutil/renew.go | 2 +- lib/pacer/pacer.go | 2 +- lib/pacer/pacer_test.go | 2 +- lib/pacer/pacers.go | 2 +- lib/pacer/tokens.go | 2 +- lib/pacer/tokens_test.go | 2 +- lib/plugin/package.go | 2 +- lib/plugin/plugin.go | 2 +- lib/pool/pool.go | 2 +- lib/pool/pool_test.go | 2 +- lib/pool/reader_writer.go | 2 +- lib/pool/reader_writer_test.go | 2 +- lib/proxy/http.go | 2 +- lib/proxy/socks.go | 2 +- lib/random/random.go | 2 +- lib/random/random_test.go | 2 +- lib/ranges/ranges.go | 2 +- lib/ranges/ranges_test.go | 2 +- lib/readers/context.go | 2 +- lib/readers/context_test.go | 2 +- lib/readers/counting_reader.go | 2 +- lib/readers/error.go | 2 +- lib/readers/error_test.go | 2 +- lib/readers/fakeseeker.go | 2 +- lib/readers/fakeseeker_test.go | 2 +- lib/readers/gzip.go | 2 +- lib/readers/gzip_test.go | 2 +- lib/readers/limited.go | 2 +- lib/readers/noclose.go | 2 +- lib/readers/noclose_test.go | 2 +- lib/readers/noseeker.go | 2 +- lib/readers/noseeker_test.go | 2 +- lib/readers/pattern_reader.go | 2 +- lib/readers/pattern_reader_test.go | 2 +- lib/readers/readfill.go | 2 +- lib/readers/readfill_test.go | 2 +- lib/readers/repeatable.go | 2 +- lib/readers/repeatable_test.go | 2 +- lib/rest/headers.go | 2 +- lib/rest/headers_test.go | 2 +- lib/rest/rest.go | 2 +- lib/rest/url.go | 2 +- lib/rest/url_test.go | 2 +- lib/sdactivation/sdactivation_stub.go | 2 +- lib/sdactivation/sdactivation_unix.go | 2 +- lib/structs/structs.go | 2 +- lib/structs/structs_test.go | 2 +- lib/systemd/doc.go | 2 +- lib/systemd/notify.go | 2 +- lib/terminal/hidden_other.go | 2 +- lib/terminal/hidden_windows.go | 2 +- lib/terminal/terminal.go | 2 +- lib/terminal/terminal_normal.go | 2 +- lib/terminal/terminal_unsupported.go | 2 +- lib/transform/cmap.go | 2 +- lib/transform/gen_help.go | 2 +- lib/transform/options.go | 2 +- lib/transform/transform.go | 2 +- lib/transform/transform_test.go | 2 +- lib/version/version.go | 2 +- lib/version/version_test.go | 2 +- librclone/gomobile/gomobile.go | 2 +- librclone/librclone.go | 2 +- librclone/librclone/librclone.go | 2 +- rclone.go | 2 +- vfs/dir.go | 2 +- vfs/dir_handle.go | 2 +- vfs/dir_handle_test.go | 2 +- vfs/dir_test.go | 2 +- vfs/errors.go | 2 +- vfs/errors_test.go | 2 +- vfs/file.go | 2 +- vfs/file_test.go | 2 +- vfs/make_open_tests.go | 2 +- vfs/open_test.go | 2 +- vfs/rc.go | 2 +- vfs/rc_test.go | 2 +- vfs/read.go | 2 +- vfs/read_test.go | 2 +- vfs/read_write.go | 2 +- vfs/read_write_test.go | 2 +- vfs/sighup.go | 2 +- vfs/sighup_unsupported.go | 2 +- vfs/test_vfs/test_vfs.go | 2 +- vfs/vfs.go | 2 +- vfs/vfs_case_test.go | 2 +- vfs/vfs_test.go | 2 +- vfs/vfscache/cache.go | 2 +- vfs/vfscache/cache_test.go | 2 +- vfs/vfscache/downloaders/downloaders.go | 2 +- vfs/vfscache/downloaders/downloaders_test.go | 2 +- vfs/vfscache/item.go | 2 +- vfs/vfscache/item_test.go | 2 +- vfs/vfscache/writeback/writeback.go | 2 +- vfs/vfscache/writeback/writeback_test.go | 2 +- vfs/vfscommon/cachemode.go | 2 +- vfs/vfscommon/cachemode_test.go | 2 +- vfs/vfscommon/filemode.go | 2 +- vfs/vfscommon/filemode_test.go | 2 +- vfs/vfscommon/options.go | 2 +- vfs/vfscommon/path.go | 2 +- vfs/vfscommon/vfsflags_non_unix.go | 2 +- vfs/vfscommon/vfsflags_unix.go | 2 +- vfs/vfsflags/vfsflags.go | 2 +- vfs/vfstest/dir.go | 2 +- vfs/vfstest/edge_cases.go | 2 +- vfs/vfstest/file.go | 2 +- vfs/vfstest/fs.go | 2 +- vfs/vfstest/os.go | 2 +- vfs/vfstest/read.go | 2 +- vfs/vfstest/read_non_unix.go | 2 +- vfs/vfstest/read_unix.go | 2 +- vfs/vfstest/submount.go | 2 +- vfs/vfstest/vfs.go | 2 +- vfs/vfstest/write.go | 2 +- vfs/vfstest/write_other.go | 2 +- vfs/vfstest/write_unix.go | 2 +- vfs/vfstest/write_windows.go | 2 +- vfs/vfstest_test.go | 2 +- vfs/vstate_string.go | 2 +- vfs/write.go | 2 +- vfs/write_test.go | 2 +- vfs/zip.go | 2 +- vfs/zip_test.go | 2 +- 1059 files changed, 1058 insertions(+), 1058 deletions(-) diff --git a/backend/alias/alias.go b/backend/alias/alias.go index 92dda17bf..ae4736163 100644 --- a/backend/alias/alias.go +++ b/backend/alias/alias.go @@ -1,4 +1,4 @@ -// Package alias implements a virtual provider to rename existing remotes. +// Package alias implements a virtual provider to rename existing remotes. package alias import ( diff --git a/backend/alias/alias_internal_test.go b/backend/alias/alias_internal_test.go index 18312eb02..9e3e0fe71 100644 --- a/backend/alias/alias_internal_test.go +++ b/backend/alias/alias_internal_test.go @@ -1,4 +1,4 @@ -package alias +package alias import ( "context" diff --git a/backend/alist/alist.go b/backend/alist/alist.go index 1b6bc9749..9b6ed6a40 100644 --- a/backend/alist/alist.go +++ b/backend/alist/alist.go @@ -1,4 +1,4 @@ -// Package alist implements an rclone backend for AList +// Package alist implements an rclone backend for AList package alist import ( diff --git a/backend/all/all.go b/backend/all/all.go index 0018a9643b4a96a9680c087d542e3634e9714665..4d6a6fc4ad9cca3ba4b673abcade0740e46e25f1 100644 GIT binary patch delta 18 Ycmexm+Gxi4e)s package main diff --git a/vfs/dir.go b/vfs/dir.go index 174b7d001..77ae24ce9 100644 --- a/vfs/dir.go +++ b/vfs/dir.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/dir_handle.go b/vfs/dir_handle.go index d2ef0f9e9..7cddeead5 100644 --- a/vfs/dir_handle.go +++ b/vfs/dir_handle.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "io" diff --git a/vfs/dir_handle_test.go b/vfs/dir_handle_test.go index cfb215c2f..f6300a8e3 100644 --- a/vfs/dir_handle_test.go +++ b/vfs/dir_handle_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/dir_test.go b/vfs/dir_test.go index 3d7ab8cee..8645b7a2a 100644 --- a/vfs/dir_test.go +++ b/vfs/dir_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/errors.go b/vfs/errors.go index c54b4dd91..5b0d569e9 100644 --- a/vfs/errors.go +++ b/vfs/errors.go @@ -1,4 +1,4 @@ -// Cross platform errors +// Cross platform errors package vfs diff --git a/vfs/errors_test.go b/vfs/errors_test.go index 7865fb6f3..e252636ad 100644 --- a/vfs/errors_test.go +++ b/vfs/errors_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "testing" diff --git a/vfs/file.go b/vfs/file.go index 76aa03b7d..aad021252 100644 --- a/vfs/file.go +++ b/vfs/file.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/file_test.go b/vfs/file_test.go index 2a4b5fd47..dace65f04 100644 --- a/vfs/file_test.go +++ b/vfs/file_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/make_open_tests.go b/vfs/make_open_tests.go index 01df4d0a9..7369c46fc 100644 --- a/vfs/make_open_tests.go +++ b/vfs/make_open_tests.go @@ -1,4 +1,4 @@ -// This makes the open test suite. It tries to open a file (existing +// This makes the open test suite. It tries to open a file (existing // or not existing) with all possible file modes and writes a test // matrix. // diff --git a/vfs/open_test.go b/vfs/open_test.go index 5fae18691..da5683a98 100644 --- a/vfs/open_test.go +++ b/vfs/open_test.go @@ -1,4 +1,4 @@ -// Code generated by make_open_tests.go - use go generate to rebuild - DO NOT EDIT +// Code generated by make_open_tests.go - use go generate to rebuild - DO NOT EDIT package vfs diff --git a/vfs/rc.go b/vfs/rc.go index d780048bd..b7d268dec 100644 --- a/vfs/rc.go +++ b/vfs/rc.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/rc_test.go b/vfs/rc_test.go index 9c70d526a..660528fe5 100644 --- a/vfs/rc_test.go +++ b/vfs/rc_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/read.go b/vfs/read.go index 777946f26..c9701f8f2 100644 --- a/vfs/read.go +++ b/vfs/read.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/read_test.go b/vfs/read_test.go index 6b7d55c4e..144bf790b 100644 --- a/vfs/read_test.go +++ b/vfs/read_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/read_write.go b/vfs/read_write.go index 56b9c467d..c399540b3 100644 --- a/vfs/read_write.go +++ b/vfs/read_write.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "fmt" diff --git a/vfs/read_write_test.go b/vfs/read_write_test.go index 5b30ed675..a6c733f82 100644 --- a/vfs/read_write_test.go +++ b/vfs/read_write_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/sighup.go b/vfs/sighup.go index fadcbcdda..3bf232341 100644 --- a/vfs/sighup.go +++ b/vfs/sighup.go @@ -1,4 +1,4 @@ -//go:build !plan9 && !js +//go:build !plan9 && !js package vfs diff --git a/vfs/sighup_unsupported.go b/vfs/sighup_unsupported.go index 19e08a34b..8d450c391 100644 --- a/vfs/sighup_unsupported.go +++ b/vfs/sighup_unsupported.go @@ -1,4 +1,4 @@ -//go:build plan9 || js +//go:build plan9 || js package vfs diff --git a/vfs/test_vfs/test_vfs.go b/vfs/test_vfs/test_vfs.go index a38f72e67..5911b386b 100644 --- a/vfs/test_vfs/test_vfs.go +++ b/vfs/test_vfs/test_vfs.go @@ -1,4 +1,4 @@ -// Test the VFS to exhaustion, specifically looking for deadlocks +// Test the VFS to exhaustion, specifically looking for deadlocks // // Run on a mounted filesystem package main diff --git a/vfs/vfs.go b/vfs/vfs.go index 1d2f71dd1..52b8d1ddc 100644 --- a/vfs/vfs.go +++ b/vfs/vfs.go @@ -1,4 +1,4 @@ -// Package vfs provides a virtual filing system layer over rclone's +// Package vfs provides a virtual filing system layer over rclone's // native objects. // // It attempts to behave in a similar way to Go's filing system diff --git a/vfs/vfs_case_test.go b/vfs/vfs_case_test.go index 26acd2210..3d1eae289 100644 --- a/vfs/vfs_case_test.go +++ b/vfs/vfs_case_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/vfs_test.go b/vfs/vfs_test.go index f175e0efc..040f7f476 100644 --- a/vfs/vfs_test.go +++ b/vfs/vfs_test.go @@ -1,4 +1,4 @@ -// Test suite for vfs +// Test suite for vfs package vfs diff --git a/vfs/vfscache/cache.go b/vfs/vfscache/cache.go index 0ffa30c76..d989f4cd5 100644 --- a/vfs/vfscache/cache.go +++ b/vfs/vfscache/cache.go @@ -1,4 +1,4 @@ -// Package vfscache deals with caching of files locally for the VFS layer +// Package vfscache deals with caching of files locally for the VFS layer package vfscache import ( diff --git a/vfs/vfscache/cache_test.go b/vfs/vfscache/cache_test.go index 8c5510f6d..e36a488e4 100644 --- a/vfs/vfscache/cache_test.go +++ b/vfs/vfscache/cache_test.go @@ -1,4 +1,4 @@ -package vfscache +package vfscache import ( "context" diff --git a/vfs/vfscache/downloaders/downloaders.go b/vfs/vfscache/downloaders/downloaders.go index 87783fb6b..ea19f4bd1 100644 --- a/vfs/vfscache/downloaders/downloaders.go +++ b/vfs/vfscache/downloaders/downloaders.go @@ -1,4 +1,4 @@ -// Package downloaders provides utilities for the VFS layer +// Package downloaders provides utilities for the VFS layer package downloaders import ( diff --git a/vfs/vfscache/downloaders/downloaders_test.go b/vfs/vfscache/downloaders/downloaders_test.go index 8d82c3f99..cc4f8e563 100644 --- a/vfs/vfscache/downloaders/downloaders_test.go +++ b/vfs/vfscache/downloaders/downloaders_test.go @@ -1,4 +1,4 @@ -package downloaders +package downloaders import ( "context" diff --git a/vfs/vfscache/item.go b/vfs/vfscache/item.go index 66adc9b66..53b2ae39e 100644 --- a/vfs/vfscache/item.go +++ b/vfs/vfscache/item.go @@ -1,4 +1,4 @@ -package vfscache +package vfscache import ( "context" diff --git a/vfs/vfscache/item_test.go b/vfs/vfscache/item_test.go index 048261b62..22ae60028 100644 --- a/vfs/vfscache/item_test.go +++ b/vfs/vfscache/item_test.go @@ -1,4 +1,4 @@ -package vfscache +package vfscache // FIXME need to test async writeback here diff --git a/vfs/vfscache/writeback/writeback.go b/vfs/vfscache/writeback/writeback.go index fe7b9ed62..26155dc59 100644 --- a/vfs/vfscache/writeback/writeback.go +++ b/vfs/vfscache/writeback/writeback.go @@ -1,4 +1,4 @@ -// Package writeback keeps track of the files which need to be written +// Package writeback keeps track of the files which need to be written // back to storage package writeback diff --git a/vfs/vfscache/writeback/writeback_test.go b/vfs/vfscache/writeback/writeback_test.go index 87c067627..8e77df9d2 100644 --- a/vfs/vfscache/writeback/writeback_test.go +++ b/vfs/vfscache/writeback/writeback_test.go @@ -1,4 +1,4 @@ -package writeback +package writeback import ( "container/heap" diff --git a/vfs/vfscommon/cachemode.go b/vfs/vfscommon/cachemode.go index ab1ccb3dc..2dd7609a5 100644 --- a/vfs/vfscommon/cachemode.go +++ b/vfs/vfscommon/cachemode.go @@ -1,4 +1,4 @@ -// Package vfscommon provides utilities for VFS. +// Package vfscommon provides utilities for VFS. package vfscommon import ( diff --git a/vfs/vfscommon/cachemode_test.go b/vfs/vfscommon/cachemode_test.go index fb8f86734..d4ad50566 100644 --- a/vfs/vfscommon/cachemode_test.go +++ b/vfs/vfscommon/cachemode_test.go @@ -1,4 +1,4 @@ -package vfscommon +package vfscommon import ( "encoding/json" diff --git a/vfs/vfscommon/filemode.go b/vfs/vfscommon/filemode.go index 109db6c18..8b882eb47 100644 --- a/vfs/vfscommon/filemode.go +++ b/vfs/vfscommon/filemode.go @@ -1,4 +1,4 @@ -package vfscommon +package vfscommon import ( "fmt" diff --git a/vfs/vfscommon/filemode_test.go b/vfs/vfscommon/filemode_test.go index 310194ca4..f440a3b3b 100644 --- a/vfs/vfscommon/filemode_test.go +++ b/vfs/vfscommon/filemode_test.go @@ -1,4 +1,4 @@ -package vfscommon +package vfscommon import ( "encoding/json" diff --git a/vfs/vfscommon/options.go b/vfs/vfscommon/options.go index 7f222edac..60c619284 100644 --- a/vfs/vfscommon/options.go +++ b/vfs/vfscommon/options.go @@ -1,4 +1,4 @@ -package vfscommon +package vfscommon import ( "context" diff --git a/vfs/vfscommon/path.go b/vfs/vfscommon/path.go index 93b42a93e..17031e3e1 100644 --- a/vfs/vfscommon/path.go +++ b/vfs/vfscommon/path.go @@ -1,4 +1,4 @@ -package vfscommon +package vfscommon import ( "path" diff --git a/vfs/vfscommon/vfsflags_non_unix.go b/vfs/vfscommon/vfsflags_non_unix.go index 0cf18a200..47d9b1755 100644 --- a/vfs/vfscommon/vfsflags_non_unix.go +++ b/vfs/vfscommon/vfsflags_non_unix.go @@ -1,4 +1,4 @@ -//go:build !linux && !darwin && !freebsd +//go:build !linux && !darwin && !freebsd package vfscommon diff --git a/vfs/vfscommon/vfsflags_unix.go b/vfs/vfscommon/vfsflags_unix.go index 48ea7ef2e..6ccab007b 100644 --- a/vfs/vfscommon/vfsflags_unix.go +++ b/vfs/vfscommon/vfsflags_unix.go @@ -1,4 +1,4 @@ -//go:build linux || darwin || freebsd +//go:build linux || darwin || freebsd package vfscommon diff --git a/vfs/vfsflags/vfsflags.go b/vfs/vfsflags/vfsflags.go index f5709d7a8..b6058528f 100644 --- a/vfs/vfsflags/vfsflags.go +++ b/vfs/vfsflags/vfsflags.go @@ -1,4 +1,4 @@ -// Package vfsflags implements command line flags to set up a vfs +// Package vfsflags implements command line flags to set up a vfs package vfsflags import ( diff --git a/vfs/vfstest/dir.go b/vfs/vfstest/dir.go index 0cf02ed0f..c88d51234 100644 --- a/vfs/vfstest/dir.go +++ b/vfs/vfstest/dir.go @@ -1,4 +1,4 @@ -package vfstest +package vfstest import ( "context" diff --git a/vfs/vfstest/edge_cases.go b/vfs/vfstest/edge_cases.go index a54b8cc90..b77750036 100644 --- a/vfs/vfstest/edge_cases.go +++ b/vfs/vfstest/edge_cases.go @@ -1,4 +1,4 @@ -package vfstest +package vfstest import ( "runtime" diff --git a/vfs/vfstest/file.go b/vfs/vfstest/file.go index e923b521f..9fc5ce02d 100644 --- a/vfs/vfstest/file.go +++ b/vfs/vfstest/file.go @@ -1,4 +1,4 @@ -package vfstest +package vfstest import ( "os" diff --git a/vfs/vfstest/fs.go b/vfs/vfstest/fs.go index 0391f855c..a2c8a6b08 100644 --- a/vfs/vfstest/fs.go +++ b/vfs/vfstest/fs.go @@ -1,4 +1,4 @@ -// Test suite for rclonefs +// Test suite for rclonefs package vfstest diff --git a/vfs/vfstest/os.go b/vfs/vfstest/os.go index ea6d00641..b18491daa 100644 --- a/vfs/vfstest/os.go +++ b/vfs/vfstest/os.go @@ -1,4 +1,4 @@ -package vfstest +package vfstest import ( "os" diff --git a/vfs/vfstest/read.go b/vfs/vfstest/read.go index 518d5021f..51d68c7c1 100644 --- a/vfs/vfstest/read.go +++ b/vfs/vfstest/read.go @@ -1,4 +1,4 @@ -package vfstest +package vfstest import ( "io" diff --git a/vfs/vfstest/read_non_unix.go b/vfs/vfstest/read_non_unix.go index d2328c8e3..628e3843f 100644 --- a/vfs/vfstest/read_non_unix.go +++ b/vfs/vfstest/read_non_unix.go @@ -1,4 +1,4 @@ -//go:build !linux && !darwin && !freebsd +//go:build !linux && !darwin && !freebsd package vfstest diff --git a/vfs/vfstest/read_unix.go b/vfs/vfstest/read_unix.go index c5b1e881f..557b9210d 100644 --- a/vfs/vfstest/read_unix.go +++ b/vfs/vfstest/read_unix.go @@ -1,4 +1,4 @@ -//go:build linux || darwin || freebsd +//go:build linux || darwin || freebsd package vfstest diff --git a/vfs/vfstest/submount.go b/vfs/vfstest/submount.go index ac6400cdf..13b4e76e5 100644 --- a/vfs/vfstest/submount.go +++ b/vfs/vfstest/submount.go @@ -1,4 +1,4 @@ -package vfstest +package vfstest import ( "bufio" diff --git a/vfs/vfstest/vfs.go b/vfs/vfstest/vfs.go index ad56fd42e..ad0838303 100644 --- a/vfs/vfstest/vfs.go +++ b/vfs/vfstest/vfs.go @@ -1,4 +1,4 @@ -// Package vfstest provides tests for VFS. +// Package vfstest provides tests for VFS. package vfstest import ( diff --git a/vfs/vfstest/write.go b/vfs/vfstest/write.go index 6d883cbf0..3f6658f62 100644 --- a/vfs/vfstest/write.go +++ b/vfs/vfstest/write.go @@ -1,4 +1,4 @@ -package vfstest +package vfstest import ( "os" diff --git a/vfs/vfstest/write_other.go b/vfs/vfstest/write_other.go index f95c3efa1..998125302 100644 --- a/vfs/vfstest/write_other.go +++ b/vfs/vfstest/write_other.go @@ -1,4 +1,4 @@ -//go:build !linux && !darwin && !freebsd && !windows +//go:build !linux && !darwin && !freebsd && !windows // +build !linux,!darwin,!freebsd,!windows package vfstest diff --git a/vfs/vfstest/write_unix.go b/vfs/vfstest/write_unix.go index bbd9c2657..c0f60e383 100644 --- a/vfs/vfstest/write_unix.go +++ b/vfs/vfstest/write_unix.go @@ -1,4 +1,4 @@ -//go:build linux || darwin || freebsd +//go:build linux || darwin || freebsd package vfstest diff --git a/vfs/vfstest/write_windows.go b/vfs/vfstest/write_windows.go index 227e302a3..f5beb0da4 100644 --- a/vfs/vfstest/write_windows.go +++ b/vfs/vfstest/write_windows.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows package vfstest diff --git a/vfs/vfstest_test.go b/vfs/vfstest_test.go index df351736e..34c34da5a 100644 --- a/vfs/vfstest_test.go +++ b/vfs/vfstest_test.go @@ -1,4 +1,4 @@ -// Run the more functional vfstest package on the vfs +// Run the more functional vfstest package on the vfs package vfs_test diff --git a/vfs/vstate_string.go b/vfs/vstate_string.go index fd9fe4487..7ceb135f3 100644 --- a/vfs/vstate_string.go +++ b/vfs/vstate_string.go @@ -1,4 +1,4 @@ -// Code generated by "stringer -type=vState"; DO NOT EDIT. +// Code generated by "stringer -type=vState"; DO NOT EDIT. package vfs diff --git a/vfs/write.go b/vfs/write.go index 8e74b45a8..434dff83c 100644 --- a/vfs/write.go +++ b/vfs/write.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/write_test.go b/vfs/write_test.go index 77173bdf5..853f8401e 100644 --- a/vfs/write_test.go +++ b/vfs/write_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "context" diff --git a/vfs/zip.go b/vfs/zip.go index 287777bed..d202caa58 100644 --- a/vfs/zip.go +++ b/vfs/zip.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "archive/zip" diff --git a/vfs/zip_test.go b/vfs/zip_test.go index d15be2483..20bae004e 100644 --- a/vfs/zip_test.go +++ b/vfs/zip_test.go @@ -1,4 +1,4 @@ -package vfs +package vfs import ( "archive/zip" From 48e0b76924a0e0967c17313724b3a61e63356afa Mon Sep 17 00:00:00 2001 From: Benji Date: Wed, 4 Feb 2026 15:44:04 +0000 Subject: [PATCH 6/9] Normalize .go files to UTF-8 and LF chore: fix encoding --- backend/all/all.go | Bin 6913 -> 3305 bytes bin/convert_go_to_utf8.py | 50 ++++++++++++++++++++++++++++++++++++ bin/normalize_go_to_utf8.py | 35 +++++++++++++++++++++++++ 3 files changed, 85 insertions(+) create mode 100644 bin/convert_go_to_utf8.py create mode 100755 bin/normalize_go_to_utf8.py diff --git a/backend/all/all.go b/backend/all/all.go index 4d6a6fc4ad9cca3ba4b673abcade0740e46e25f1..d740512124df6cbcbe652181ba384178edc163b9 100644 GIT binary patch literal 3305 zcmb7G%Wm8-47~d*jGUTF7ifQ^e-Ka-ZE-D$svgPietqRlgIq@^8}Nu8lEa}cm-P?X zgLYmigjHhJ((3cQd0x%<#>v$se}k6g6Se+W-qx30n(yYP)p#j3XyUKO|DzWQLiGiO z)`miZV^hR|@M68ZGYEe4!X~JhfNv5Q%+mbA;u^hSKugwxsafCC!3B&Pv^z;u?jT;H zk%ThQm4NOnL`@wlQH?%XCrmS{UpiET7AZ?XJNeCRkf;m|t9 z9YnW`0e3z5qFCJn54Y*AHZ8-vX3k}=O+npxsvhl+cN-d2CIsahT0(hNWFoZ0QMyCG z!IIRtByKe0rJsjx;4iM)8a=bX8hWU1pqT|p2RxTSEd`81BnbtavdBAuQ8_AVyQ3vj zQ!sY)ghn)v{g2z$}-`lMr2!76}tt+SQ zoAKM3H*fdl$MehcPrlc-wv~Og${IUz-C4_hWj$Xj+uO-T8*H+(rMu71*5&8w*Z*tn z!8Sbc&ffC;|FHBI{oEt2>_h(SjeP_AJ+b%1>1?*zK9SEr^fj>?av6xw{&V+t{B_QI z&ozOjXx_+){34Kh*lT&~2?jO+5yW8!_IINA=YM0xrkualsSb>=M4Fs3)9Peo}XI%rY9E!|UQMdQAvON^dQ_12dK`ymWD zy}Abf%+J(}H|mZ`TsVoq9&l8emc>6wdF5Z^=8DgYj5#t(h8)d*R(Ir|z>ny$4wkk7 z+AjAMr)nKcM^vcPY?<0|*=vTBiaf%Szr&W_IOl579^y=_NI!fZ?Go@dhul|si$81Q z-N{E{NSpj!V9yH6VaCi~)vEk)@2=IVQ0+aYfVn6KwFBRK6@`0(kqD<s8nn7}9BoJc^XUKZZ^! zPGHaZddqkII~eT}i~rHdSX6zDyv3P!8Qgh#%rFa$m*u3JxroD{HJ1~@BPmx7ezk=)ZLuj!9*`!QAGbef3hc- zR{eQ*xmF&dNB3lP6ZRMo%jIl6SRS ULDjTu117=^Msy-ka{g$)0HXA==>Px# diff --git a/bin/convert_go_to_utf8.py b/bin/convert_go_to_utf8.py new file mode 100644 index 000000000..701bdb87f --- /dev/null +++ b/bin/convert_go_to_utf8.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +import subprocess, os, sys +root = os.getcwd() +files = subprocess.check_output(["git","ls-files","*.go"], cwd=root).decode().splitlines() +converted = [] +failed = [] +for f in files: + path = os.path.join(root, f) + try: + data = open(path, 'rb').read() + except Exception as e: + failed.append((f, str(e))) + continue + if b'\x00' not in data: + continue + # try utf-8 first + try: + text = data.decode('utf-8') + # if decoded utf-8 contains NUL characters, treat as failure + if "\x00" in text: + raise UnicodeDecodeError('utf-8','',0,1,'contains NUL') + # already valid utf-8 + continue + except Exception: + pass + dec_success = False + for enc in ('utf-16','utf-16-le','utf-16-be'): + try: + text = data.decode(enc) + # strip trailing BOM if any + if text and text[0] == '\ufeff': + text = text[1:] + # write back as utf-8 + with open(path, 'w', encoding='utf-8', newline='\n') as w: + w.write(text) + converted.append(f) + dec_success = True + break + except Exception: + continue + if not dec_success: + failed.append((f, 'could not decode as utf-16')) + +print('Converted files:', len(converted)) +for c in converted: + print(c) +if failed: + print('\nFailed conversions:', len(failed)) + for f,err in failed: + print(f, err) diff --git a/bin/normalize_go_to_utf8.py b/bin/normalize_go_to_utf8.py new file mode 100755 index 000000000..79c4f11be --- /dev/null +++ b/bin/normalize_go_to_utf8.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +import subprocess,os,sys +root=os.getcwd() +files=subprocess.check_output(['git','ls-files','*.go'],cwd=root).decode().splitlines() +changed=[] +for f in files: + p=os.path.join(root,f) + with open(p,'rb') as fh: + data=fh.read() + text=None + try: + text=data.decode('utf-8') + except Exception: + # try utf-16 variants + for enc in ('utf-16','utf-16-le','utf-16-be'): + try: + text=data.decode(enc) + break + except Exception: + continue + if text is None: + print('Skipping (cannot decode):',f) + continue + # strip BOM + if text and text[0]=='\ufeff': + text=text[1:] + # normalize newlines to \n + text=text.replace('\r\n','\n').replace('\r','\n') + # write back as utf-8 + with open(p,'wb') as fh: + fh.write(text.encode('utf-8')) + changed.append(f) +print('Processed files:',len(files),'Rewritten:',len(changed)) +for c in changed[:200]: + print(c) From 95ccd3c80381474919821999dc3e1843f0244df4 Mon Sep 17 00:00:00 2001 From: Benji Date: Wed, 4 Feb 2026 15:53:21 +0000 Subject: [PATCH 7/9] Prune missing backend imports from backend/all --- backend/all/all.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/backend/all/all.go b/backend/all/all.go index d74051212..92244a4da 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -5,7 +5,6 @@ import ( _ "github.com/rclone/rclone/backend/alias" _ "github.com/rclone/rclone/backend/alist" _ "github.com/rclone/rclone/backend/alldebrid" -_ "github.com/rclone/rclone/backend/archive" _ "github.com/rclone/rclone/backend/azureblob" _ "github.com/rclone/rclone/backend/azurefiles" _ "github.com/rclone/rclone/backend/b2" @@ -17,13 +16,11 @@ _ "github.com/rclone/rclone/backend/combine" _ "github.com/rclone/rclone/backend/compress" _ "github.com/rclone/rclone/backend/crypt" _ "github.com/rclone/rclone/backend/doi" -_ "github.com/rclone/rclone/backend/drime" _ "github.com/rclone/rclone/backend/drive" _ "github.com/rclone/rclone/backend/dropbox" _ "github.com/rclone/rclone/backend/fichier" _ "github.com/rclone/rclone/backend/filefabric" _ "github.com/rclone/rclone/backend/filelu" -_ "github.com/rclone/rclone/backend/filen" _ "github.com/rclone/rclone/backend/filescom" _ "github.com/rclone/rclone/backend/ftp" _ "github.com/rclone/rclone/backend/gofile" @@ -36,7 +33,6 @@ _ "github.com/rclone/rclone/backend/http" _ "github.com/rclone/rclone/backend/iclouddrive" _ "github.com/rclone/rclone/backend/imagekit" _ "github.com/rclone/rclone/backend/internetarchive" -_ "github.com/rclone/rclone/backend/internxt" _ "github.com/rclone/rclone/backend/jottacloud" _ "github.com/rclone/rclone/backend/koofr" _ "github.com/rclone/rclone/backend/linkbox" @@ -59,7 +55,6 @@ _ "github.com/rclone/rclone/backend/quatrix" _ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/sftp" -_ "github.com/rclone/rclone/backend/shade" _ "github.com/rclone/rclone/backend/sharefile" _ "github.com/rclone/rclone/backend/sia" _ "github.com/rclone/rclone/backend/smb" From 670066028f2dfc9f58ebf0642e6452a50b999fe6 Mon Sep 17 00:00:00 2001 From: Benji Date: Wed, 4 Feb 2026 15:56:42 +0000 Subject: [PATCH 8/9] Use ServerSideTransferEnd to account resumed bytes (AccountReadN removed) --- fs/operations/multithread.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/operations/multithread.go b/fs/operations/multithread.go index 0e33d6666..68ba5920f 100644 --- a/fs/operations/multithread.go +++ b/fs/operations/multithread.go @@ -568,7 +568,7 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, } } if completedBytes > 0 { - mc.acc.AccountReadN(completedBytes) + mc.acc.ServerSideTransferEnd(completedBytes) } } From ac7f48e5aa115a14a6ae15ca0a4725d338204e37 Mon Sep 17 00:00:00 2001 From: Benji Date: Wed, 4 Feb 2026 16:18:29 +0000 Subject: [PATCH 9/9] chore: bump version --- MANUAL.html | 2 +- MANUAL.md | 2 +- MANUAL.txt | 2 +- VERSION | 2 +- docs/content/commands/rclone.md | 2 +- docs/content/flags.md | 2 +- docs/layouts/partials/version.html | 2 +- fs/versiontag.go | 2 +- rclone.1 | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/MANUAL.html b/MANUAL.html index d47636c4f..23887b83d 100644 --- a/MANUAL.html +++ b/MANUAL.html @@ -21765,7 +21765,7 @@

Networking

--tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --use-cookies Enable session cookiejar - --user-agent string Set the user-agent to a specified string (default "rclone/v1.73.1") + --user-agent string Set the user-agent to a specified string (default "rclone/v1.73.2")

Performance

Flags helpful for increasing performance.

      --buffer-size SizeSuffix   In memory buffer size when reading files for each --transfer (default 16Mi)
diff --git a/MANUAL.md b/MANUAL.md
index a3eb4762e..9d61008fa 100644
--- a/MANUAL.md
+++ b/MANUAL.md
@@ -23165,7 +23165,7 @@ Flags for general networking and HTTP stuff.
       --tpslimit float                     Limit HTTP transactions per second to this
       --tpslimit-burst int                 Max burst of transactions for --tpslimit (default 1)
       --use-cookies                        Enable session cookiejar
-      --user-agent string                  Set the user-agent to a specified string (default "rclone/v1.73.1")
+      --user-agent string                  Set the user-agent to a specified string (default "rclone/v1.73.2")
 ```
 
 
diff --git a/MANUAL.txt b/MANUAL.txt
index cabd37c9b..d0e315077 100644
--- a/MANUAL.txt
+++ b/MANUAL.txt
@@ -22527,7 +22527,7 @@ Flags for general networking and HTTP stuff.
           --tpslimit float                     Limit HTTP transactions per second to this
           --tpslimit-burst int                 Max burst of transactions for --tpslimit (default 1)
           --use-cookies                        Enable session cookiejar
-          --user-agent string                  Set the user-agent to a specified string (default "rclone/v1.73.1")
+          --user-agent string                  Set the user-agent to a specified string (default "rclone/v1.73.2")
 
 Performance
 
diff --git a/VERSION b/VERSION
index 719caf748..b12b8266c 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-v1.73.1
+v1.73.2
diff --git a/docs/content/commands/rclone.md b/docs/content/commands/rclone.md
index 3738aa446..aa531f0d0 100644
--- a/docs/content/commands/rclone.md
+++ b/docs/content/commands/rclone.md
@@ -1015,7 +1015,7 @@ rclone [flags]
       --use-json-log                                        Use json log format
       --use-mmap                                            Use mmap allocator (see docs)
       --use-server-modtime                                  Use server modified time instead of object metadata
-      --user-agent string                                   Set the user-agent to a specified string (default "rclone/v1.73.1")
+      --user-agent string                                   Set the user-agent to a specified string (default "rclone/v1.73.2")
   -v, --verbose count                                       Print lots more stuff (repeat for more)
   -V, --version                                             Print the version number
       --webdav-auth-redirect                                Preserve authentication on redirect
diff --git a/docs/content/flags.md b/docs/content/flags.md
index c92a1c921..8f2977872 100644
--- a/docs/content/flags.md
+++ b/docs/content/flags.md
@@ -121,7 +121,7 @@ Flags for general networking and HTTP stuff.
       --tpslimit float                     Limit HTTP transactions per second to this
       --tpslimit-burst int                 Max burst of transactions for --tpslimit (default 1)
       --use-cookies                        Enable session cookiejar
-      --user-agent string                  Set the user-agent to a specified string (default "rclone/v1.73.1")
+      --user-agent string                  Set the user-agent to a specified string (default "rclone/v1.73.2")
 ```
 
 
diff --git a/docs/layouts/partials/version.html b/docs/layouts/partials/version.html
index 45e411a0a..3786da578 100644
--- a/docs/layouts/partials/version.html
+++ b/docs/layouts/partials/version.html
@@ -1 +1 @@
-v1.73.1
\ No newline at end of file
+v1.73.2
\ No newline at end of file
diff --git a/fs/versiontag.go b/fs/versiontag.go
index 7ae12341e..8b1ead9d8 100644
--- a/fs/versiontag.go
+++ b/fs/versiontag.go
@@ -1,4 +1,4 @@
 package fs
 
 // VersionTag of rclone
-var VersionTag = "v1.73.1"
+var VersionTag = "v1.73.2"
diff --git a/rclone.1 b/rclone.1
index d2617c70d..6a565dcd2 100644
--- a/rclone.1
+++ b/rclone.1
@@ -30907,7 +30907,7 @@ Flags for general networking and HTTP stuff.
       --tpslimit float                     Limit HTTP transactions per second to this
       --tpslimit-burst int                 Max burst of transactions for --tpslimit (default 1)
       --use-cookies                        Enable session cookiejar
-      --user-agent string                  Set the user-agent to a specified string (default \[dq]rclone/v1.73.1\[dq])
+      --user-agent string                  Set the user-agent to a specified string (default \[dq]rclone/v1.73.2\[dq])
 \f[R]
 .fi
 .SS Performance