From 41493927c690675a246763d0984f51a2394e4af5 Mon Sep 17 00:00:00 2001 From: jzunigax2 <125698953+jzunigax2@users.noreply.github.com> Date: Tue, 23 Dec 2025 10:21:27 -0600 Subject: [PATCH 1/3] internxt: Integrate pacer for retry logic in various folder and file operations to enhance error handling and improve reliability --- backend/internxt/internxt.go | 108 +++++++++++++++++++++++------------ 1 file changed, 73 insertions(+), 35 deletions(-) diff --git a/backend/internxt/internxt.go b/backend/internxt/internxt.go index 320e2ea9b4b59..e2cfaaa55db08 100644 --- a/backend/internxt/internxt.go +++ b/backend/internxt/internxt.go @@ -388,7 +388,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error { // FindLeaf looks for a sub‑folder named `leaf` under the Internxt folder `pathID`. // If found, it returns its UUID and true. If not found, returns "", false. func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) { - entries, err := folders.ListAllFolders(ctx, f.cfg, pathID) + var entries []folders.Folder + err := f.pacer.Call(func() (bool, error) { + var err error + entries, err = folders.ListAllFolders(ctx, f.cfg, pathID) + return shouldRetry(ctx, err) + }) if err != nil { return "", false, err } @@ -484,7 +489,12 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { } var out fs.DirEntries - foldersList, err := folders.ListAllFolders(ctx, f.cfg, dirID) + var foldersList []folders.Folder + err = f.pacer.Call(func() (bool, error) { + var err error + foldersList, err = folders.ListAllFolders(ctx, f.cfg, dirID) + return shouldRetry(ctx, err) + }) if err != nil { return nil, err } @@ -492,7 +502,12 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) { remote := filepath.Join(dir, f.opt.Encoding.ToStandardName(e.PlainName)) out = append(out, fs.NewDir(remote, e.ModificationTime)) } - filesList, err := folders.ListAllFiles(ctx, f.cfg, dirID) + var filesList []folders.File + err = f.pacer.Call(func() (bool, error) { + var err error + filesList, err = folders.ListAllFiles(ctx, f.cfg, dirID) + return shouldRetry(ctx, err) + }) if err != nil { return nil, err } @@ -571,7 +586,11 @@ func (f *Fs) Remove(ctx context.Context, remote string) error { if err != nil { return err } - if err := folders.DeleteFolder(ctx, f.cfg, dirID); err != nil { + err = f.pacer.Call(func() (bool, error) { + err := folders.DeleteFolder(ctx, f.cfg, dirID) + return shouldRetry(ctx, err) + }) + if err != nil { return err } f.dirCache.FlushDir(remote) @@ -591,7 +610,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { return nil, fs.ErrorObjectNotFound } - files, err := folders.ListAllFiles(ctx, f.cfg, dirID) + var files []folders.File + err = f.pacer.Call(func() (bool, error) { + var err error + files, err = folders.ListAllFiles(ctx, f.cfg, dirID) + return shouldRetry(ctx, err) + }) if err != nil { return nil, err } @@ -669,12 +693,22 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) error { // About gets quota information func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { - internxtLimit, err := users.GetLimit(ctx, f.cfg) + var internxtLimit *users.LimitResponse + err := f.pacer.Call(func() (bool, error) { + var err error + internxtLimit, err = users.GetLimit(ctx, f.cfg) + return shouldRetry(ctx, err) + }) if err != nil { return nil, err } - internxtUsage, err := users.GetUsage(ctx, f.cfg) + var internxtUsage *users.UsageResponse + err = f.pacer.Call(func() (bool, error) { + var err error + internxtUsage, err = users.GetUsage(ctx, f.cfg) + return shouldRetry(ctx, err) + }) if err != nil { return nil, err } @@ -711,7 +745,16 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo if o.f.opt.SimulateEmptyFiles && o.size == 0 { return io.NopCloser(bytes.NewReader(nil)), nil } - return buckets.DownloadFileStream(ctx, o.f.cfg, o.id, rangeValue) + var stream io.ReadCloser + err := o.f.pacer.Call(func() (bool, error) { + var err error + stream, err = buckets.DownloadFileStream(ctx, o.f.cfg, o.id, rangeValue) + return shouldRetry(ctx, err) + }) + if err != nil { + return nil, err + } + return stream, nil } // Update updates an existing file or creates a new one @@ -768,7 +811,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op backupType = ext // Rename existing file to backup name - err = files.RenameFile(ctx, o.f.cfg, oldUUID, backupName, backupType) + err = o.f.pacer.Call(func() (bool, error) { + err := files.RenameFile(ctx, o.f.cfg, oldUUID, backupName, backupType) + return shouldRetry(ctx, err) + }) if err != nil { return fmt.Errorf("failed to rename existing file to backup: %w", err) } @@ -777,23 +823,30 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op fs.Debugf(o.f, "Renamed existing file %s to backup %s.%s (UUID: %s)", remote, backupName, backupType, backupUUID) } - // Step 2: Upload new file to original location - meta, err := buckets.UploadFileStreamAuto(ctx, - o.f.cfg, - dirID, - o.f.opt.Encoding.FromStandardName(filepath.Base(remote)), - in, - src.Size(), - src.ModTime(ctx), - ) + var meta *buckets.CreateMetaResponse + err = o.f.pacer.Call(func() (bool, error) { + var err error + meta, err = buckets.UploadFileStreamAuto(ctx, + o.f.cfg, + dirID, + o.f.opt.Encoding.FromStandardName(filepath.Base(remote)), + in, + src.Size(), + src.ModTime(ctx), + ) + return shouldRetry(ctx, err) + }) if err != nil { // Upload failed - restore backup if it exists if backupUUID != "" { fs.Debugf(o.f, "Upload failed, attempting to restore backup %s.%s to %s", backupName, backupType, remote) - restoreErr := files.RenameFile(ctx, o.f.cfg, backupUUID, - o.f.opt.Encoding.FromStandardName(origName), origType) + restoreErr := o.f.pacer.Call(func() (bool, error) { + err := files.RenameFile(ctx, o.f.cfg, backupUUID, + o.f.opt.Encoding.FromStandardName(origName), origType) + return shouldRetry(ctx, err) + }) if restoreErr != nil { fs.Errorf(o.f, "CRITICAL: Upload failed AND backup restore failed: %v. Backup file remains as %s.%s (UUID: %s)", restoreErr, backupName, backupType, backupUUID) @@ -801,21 +854,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op } fs.Debugf(o.f, "Upload failed, successfully restored backup file to original name") } - return fmt.Errorf("upload failed: %w", err) - } - - // Step 3: Upload succeeded - delete backup file - if backupUUID != "" { - fs.Debugf(o.f, "Upload succeeded, deleting backup %s.%s (UUID: %s)", backupName, backupType, backupUUID) - - if err := files.DeleteFile(ctx, o.f.cfg, backupUUID); err != nil { - if !strings.Contains(err.Error(), "404") { - fs.Logf(o.f, "Warning: uploaded new version but failed to delete backup %s.%s (UUID: %s): %v. You may need to manually delete this orphaned file.", - backupName, backupType, backupUUID, err) - } - } else { - fs.Debugf(o.f, "Successfully deleted backup file after upload") - } } // Update object metadata From 43a634604af5396f471003dae6bbc57f37c227ac Mon Sep 17 00:00:00 2001 From: jzunigax2 <125698953+jzunigax2@users.noreply.github.com> Date: Wed, 24 Dec 2025 13:01:02 -0600 Subject: [PATCH 2/3] internxt: fix pre-upload file existence check and enhance backup file deletion process in Update method --- backend/internxt/internxt.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/backend/internxt/internxt.go b/backend/internxt/internxt.go index e2cfaaa55db08..726c1650dc190 100644 --- a/backend/internxt/internxt.go +++ b/backend/internxt/internxt.go @@ -447,7 +447,7 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string) (*fol return nil, nil } - if len(checkResult.Files) > 0 && checkResult.Files[0].Exists { + if len(checkResult.Files) > 0 && checkResult.Files[0].FileExists() { existingUUID := checkResult.Files[0].UUID if existingUUID != "" { fileMeta, err := files.GetFileMeta(ctx, f.cfg, existingUUID) @@ -460,7 +460,6 @@ func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string) (*fol } } } - return nil, nil } @@ -854,6 +853,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op } fs.Debugf(o.f, "Upload failed, successfully restored backup file to original name") } + return err } // Update object metadata @@ -864,6 +864,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op o.size = 0 } + // Step 3: Upload succeeded - delete the backup file + if backupUUID != "" { + fs.Debugf(o.f, "Upload succeeded, deleting backup file %s.%s (UUID: %s)", backupName, backupType, backupUUID) + err := o.f.pacer.Call(func() (bool, error) { + err := files.DeleteFile(ctx, o.f.cfg, backupUUID) + return shouldRetry(ctx, err) + }) + if err != nil { + fs.Errorf(o.f, "Failed to delete backup file %s.%s (UUID: %s): %v. This may leave an orphaned backup file.", + backupName, backupType, backupUUID, err) + // Don't fail the upload just because backup deletion failed + } else { + fs.Debugf(o.f, "Successfully deleted backup file") + } + } + return nil } From 6e973261b27dda756556ee5f6b3d0a11e4e2cfdd Mon Sep 17 00:00:00 2001 From: jzunigax2 <125698953+jzunigax2@users.noreply.github.com> Date: Fri, 26 Dec 2025 16:02:25 -0600 Subject: [PATCH 3/3] internxt: fix folder creation conflict resolution, fix pacer no retry in problematic calls --- backend/internxt/internxt.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/backend/internxt/internxt.go b/backend/internxt/internxt.go index 726c1650dc190..3c588dd2ee547 100644 --- a/backend/internxt/internxt.go +++ b/backend/internxt/internxt.go @@ -72,7 +72,8 @@ func init() { encoder.EncodeSlash | encoder.EncodeBackSlash | encoder.EncodeRightPeriod | - encoder.EncodeDot, + encoder.EncodeDot | + encoder.EncodeCrLf, }, }}, ) @@ -414,12 +415,20 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) } var resp *folders.Folder - err := f.pacer.Call(func() (bool, error) { + err := f.pacer.CallNoRetry(func() (bool, error) { var err error resp, err = folders.CreateFolder(ctx, f.cfg, request) return shouldRetry(ctx, err) }) if err != nil { + // If folder already exists (409 conflict), try to find it + if strings.Contains(err.Error(), "409") || strings.Contains(err.Error(), "Conflict") { + existingID, found, findErr := f.FindLeaf(ctx, pathID, leaf) + if findErr == nil && found { + fs.Debugf(f, "Folder %q already exists in %q, using existing UUID: %s", leaf, pathID, existingID) + return existingID, nil + } + } return "", fmt.Errorf("can't create folder, %w", err) } @@ -823,7 +832,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op } var meta *buckets.CreateMetaResponse - err = o.f.pacer.Call(func() (bool, error) { + err = o.f.pacer.CallNoRetry(func() (bool, error) { var err error meta, err = buckets.UploadFileStreamAuto(ctx, o.f.cfg,