diff --git a/cmd/cat.go b/cmd/cat.go index 698eabf..24e15ff 100644 --- a/cmd/cat.go +++ b/cmd/cat.go @@ -1,7 +1,16 @@ package cmd import ( + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/meigma/blob" "github.com/spf13/cobra" + + internalcfg "github.com/meigma/blob-cli/internal/config" ) var catCmd = &cobra.Command{ @@ -16,7 +25,84 @@ downloading the entire archive.`, blob cat ghcr.io/acme/configs:v1.0.0 config.json | jq . blob cat ghcr.io/acme/configs:v1.0.0 header.txt body.txt footer.txt > combined.txt`, Args: cobra.MinimumNArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: runCat, +} + +func runCat(cmd *cobra.Command, args []string) error { + // 1. Get config from context + cfg := internalcfg.FromContext(cmd.Context()) + if cfg == nil { + return errors.New("configuration not loaded") + } + + // 2. Parse arguments + inputRef := args[0] + filePaths := args[1:] + + // 3. Resolve alias + resolvedRef := cfg.ResolveAlias(inputRef) + + // 4. Create client (lazy - only downloads manifest + index) + client, err := blob.NewClient(blob.WithDockerConfig()) + if err != nil { + return fmt.Errorf("creating client: %w", err) + } + + // 5. Pull archive (lazy - does NOT download data blob) + ctx := cmd.Context() + blobArchive, err := client.Pull(ctx, resolvedRef) + if err != nil { + return fmt.Errorf("accessing archive %s: %w", resolvedRef, err) + } + + // 6. Normalize and validate all files exist and are not directories before outputting anything + normalizedPaths := make([]string, len(filePaths)) + for i, filePath := range filePaths { + // Normalize path - strip leading slash for fs.FS compatibility + normalized := strings.TrimPrefix(filePath, "/") + if normalized == "" { + return fmt.Errorf("invalid path: %s", filePath) + } + normalizedPaths[i] = normalized + + info, err := blobArchive.Stat(normalized) + if err != nil { + return fmt.Errorf("file not found: %s", filePath) + } + if info.IsDir() { + return fmt.Errorf("cannot cat directory: %s", filePath) + } + } + + // 7. Check quiet mode - suppress output only after validation + if cfg.Quiet { return nil - }, + } + + // 8. Stream each file to stdout + for _, normalizedPath := range normalizedPaths { + if err := catFile(blobArchive, normalizedPath); err != nil { + return err + } + } + + return nil +} + +// catFile streams a single file from the archive to stdout. +// Each file read triggers an HTTP range request for just that file's bytes. +func catFile(archive *blob.Archive, filePath string) error { + // Open the file (triggers HTTP range request) + f, err := archive.Open(filePath) + if err != nil { + return fmt.Errorf("opening %s: %w", filePath, err) + } + defer f.Close() + + // Stream to stdout + if _, err := io.Copy(os.Stdout, f); err != nil { + return fmt.Errorf("reading %s: %w", filePath, err) + } + + return nil } diff --git a/cmd/cat_test.go b/cmd/cat_test.go new file mode 100644 index 0000000..d2a4add --- /dev/null +++ b/cmd/cat_test.go @@ -0,0 +1,37 @@ +package cmd + +import ( + "context" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCatCmd_NilConfig(t *testing.T) { + viper.Reset() + + ctx := context.Background() + + catCmd.SetContext(ctx) + err := catCmd.RunE(catCmd, []string{"ghcr.io/test:v1", "config.json"}) + + require.Error(t, err) + assert.Contains(t, err.Error(), "configuration not loaded") +} + +func TestCatCmd_MinimumArgs(t *testing.T) { + // Verify command requires at least 2 args (ref + file) + assert.Equal(t, "cat ...", catCmd.Use) + + // Cobra's MinimumNArgs(2) is set + err := catCmd.Args(catCmd, []string{"only-one-arg"}) + require.Error(t, err) + + err = catCmd.Args(catCmd, []string{"ref", "file"}) + require.NoError(t, err) + + err = catCmd.Args(catCmd, []string{"ref", "file1", "file2"}) + require.NoError(t, err) +} diff --git a/cmd/cp.go b/cmd/cp.go index ddabcf0..93c4fdb 100644 --- a/cmd/cp.go +++ b/cmd/cp.go @@ -1,7 +1,21 @@ package cmd import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/meigma/blob" "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/meigma/blob-cli/internal/archive" + internalcfg "github.com/meigma/blob-cli/internal/config" ) var cpCmd = &cobra.Command{ @@ -21,12 +35,502 @@ Behavior: blob cp ghcr.io/acme/configs:v1.0.0:/etc/nginx/ ./nginx/ blob cp ghcr.io/acme/configs:v1.0.0:/a.json ghcr.io/acme/configs:v1.0.0:/b.json ./`, Args: cobra.MinimumNArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return nil - }, + RunE: runCp, } func init() { cpCmd.Flags().BoolP("recursive", "r", true, "copy directories recursively") - cpCmd.Flags().Bool("preserve", false, "preserve file permissions from archive") + cpCmd.Flags().Bool("preserve", false, "preserve file permissions and timestamps from archive") + cpCmd.Flags().BoolP("force", "f", false, "overwrite existing files") +} + +// cpFlags holds the parsed command flags. +type cpFlags struct { + recursive bool + preserve bool + force bool +} + +// cpSource represents a parsed source argument (ref:/path). +type cpSource struct { + inputRef string // Original input reference (for display) + ref string // Resolved reference + path string // Path within archive (with leading /) +} + +// cpResolvedSource represents a source with its archive and detected type. +type cpResolvedSource struct { + cpSource + archive *blob.Archive + isDir bool +} + +// cpResult contains the result of a copy operation. +type cpResult struct { + Sources []cpSourceResult `json:"sources"` + Destination string `json:"destination"` + FileCount int `json:"file_count"` + TotalSize uint64 `json:"total_size"` + SizeHuman string `json:"size_human,omitempty"` +} + +// cpSourceResult represents a single source in the result. +type cpSourceResult struct { + Ref string `json:"ref"` + Path string `json:"path"` +} + +func runCp(cmd *cobra.Command, args []string) error { + // 1. Get config from context + cfg := internalcfg.FromContext(cmd.Context()) + if cfg == nil { + return errors.New("configuration not loaded") + } + + // 2. Parse flags + flags, err := parseCpFlags(cmd) + if err != nil { + return err + } + + // 3. Parse source arguments (all but last) + sourceArgs := args[:len(args)-1] + dest := args[len(args)-1] + + sources, err := parseSourceArgs(sourceArgs, cfg) + if err != nil { + return err + } + + // 4. Pull archives and resolve source types + ctx := cmd.Context() + archiveCache := make(map[string]*blob.Archive) + resolvedSources := make([]cpResolvedSource, 0, len(sources)) + + for _, src := range sources { + rsrc, resolveErr := resolveSource(ctx, src, archiveCache) + if resolveErr != nil { + return resolveErr + } + resolvedSources = append(resolvedSources, rsrc) + } + + // 5. Validate destination and determine overall copy mode + destPath, err := validateAndPrepareDestination(resolvedSources, dest, flags) + if err != nil { + return err + } + + // 6. Execute copy operations + result := &cpResult{ + Sources: make([]cpSourceResult, 0, len(sources)), + Destination: destPath, + } + + copyOpts := buildCopyOpts(flags) + + for _, rsrc := range resolvedSources { + count, size, err := copyResolvedSource(rsrc, destPath, flags, copyOpts, len(resolvedSources) > 1) + if err != nil { + return err + } + result.FileCount += count + result.TotalSize += size + result.Sources = append(result.Sources, cpSourceResult{ + Ref: rsrc.inputRef, + Path: rsrc.path, + }) + } + + result.SizeHuman = archive.FormatSize(result.TotalSize) + + // 7. Output result + return outputCpResult(cfg, result) +} + +// resolveSource pulls the archive (if not cached) and detects if the source is a file or directory. +func resolveSource(ctx context.Context, src cpSource, cache map[string]*blob.Archive) (cpResolvedSource, error) { + // Get or create archive for this ref + blobArchive, ok := cache[src.ref] + if !ok { + client, clientErr := blob.NewClient(blob.WithDockerConfig()) + if clientErr != nil { + return cpResolvedSource{}, fmt.Errorf("creating client: %w", clientErr) + } + var pullErr error + blobArchive, pullErr = client.Pull(ctx, src.ref) + if pullErr != nil { + return cpResolvedSource{}, fmt.Errorf("accessing archive %s: %w", src.ref, pullErr) + } + cache[src.ref] = blobArchive + } + + // Detect if source is a file or directory + srcPath := strings.TrimPrefix(src.path, "/") + if srcPath == "" { + srcPath = "." + } + isDir, dirErr := isArchiveDir(blobArchive, srcPath) + if dirErr != nil { + return cpResolvedSource{}, fmt.Errorf("checking source %s: %w", src.path, dirErr) + } + + return cpResolvedSource{ + cpSource: src, + archive: blobArchive, + isDir: isDir, + }, nil +} + +// isArchiveDir checks if the path is a directory in the archive. +// It uses archive.Stat which synthesizes directory info from entry prefixes. +func isArchiveDir(blobArchive *blob.Archive, path string) (bool, error) { + // Handle trailing slash hint from user input + if strings.HasSuffix(path, "/") { + path = strings.TrimSuffix(path, "/") + if path == "" { + path = "." + } + } + + info, err := blobArchive.Stat(path) + if err != nil { + // Check if this is a "not exist" error + var pathErr *fs.PathError + if errors.As(err, &pathErr) && errors.Is(pathErr.Err, fs.ErrNotExist) { + return false, fmt.Errorf("path not found in archive: %s", path) + } + return false, err + } + return info.IsDir(), nil +} + +// destInfo holds information about the destination path. +type destInfo struct { + absPath string + exists bool + isDir bool + endsWithSlash bool +} + +// getDestInfo gathers information about the destination path. +func getDestInfo(dest string) (destInfo, error) { + absPath, err := filepath.Abs(dest) + if err != nil { + return destInfo{}, fmt.Errorf("resolving destination path: %w", err) + } + + info, statErr := os.Stat(absPath) + exists := statErr == nil + isDir := exists && info.IsDir() + endsSlash := strings.HasSuffix(dest, "/") || strings.HasSuffix(dest, string(os.PathSeparator)) + + return destInfo{ + absPath: absPath, + exists: exists, + isDir: isDir, + endsWithSlash: endsSlash, + }, nil +} + +// ensureDir creates the directory if it doesn't exist. +func ensureDir(path string) error { + if err := os.MkdirAll(path, 0o750); err != nil { + return fmt.Errorf("creating directory: %w", err) + } + return nil +} + +// validateAndPrepareDestination validates the destination against sources and prepares it. +func validateAndPrepareDestination(sources []cpResolvedSource, dest string, flags cpFlags) (string, error) { + di, err := getDestInfo(dest) + if err != nil { + return "", err + } + + // Check for directory sources without -r flag + for _, src := range sources { + if src.isDir && !flags.recursive { + return "", fmt.Errorf("cannot copy directory %s without -r flag", src.path) + } + } + + // Multiple sources always require a directory destination + if len(sources) > 1 { + return prepareMultiSourceDest(di) + } + + // Single source + return prepareSingleSourceDest(sources[0], di) +} + +// prepareMultiSourceDest prepares destination for multiple sources. +func prepareMultiSourceDest(di destInfo) (string, error) { + if di.exists && !di.isDir { + return "", errors.New("destination must be a directory when copying multiple sources") + } + if !di.exists { + if err := ensureDir(di.absPath); err != nil { + return "", err + } + } + return di.absPath, nil +} + +// prepareSingleSourceDest prepares destination for a single source. +func prepareSingleSourceDest(src cpResolvedSource, di destInfo) (string, error) { + // Directory source requires directory destination + if src.isDir { + if di.exists && !di.isDir { + return "", fmt.Errorf("cannot copy directory %s to file %s", src.path, di.absPath) + } + if !di.exists { + if err := ensureDir(di.absPath); err != nil { + return "", err + } + } + return di.absPath, nil + } + + // File source to directory + if di.isDir || di.endsWithSlash { + if !di.exists { + if err := ensureDir(di.absPath); err != nil { + return "", err + } + } + return di.absPath, nil + } + + // File to file copy - ensure parent directory exists + parentDir := filepath.Dir(di.absPath) + if err := ensureDir(parentDir); err != nil { + return "", fmt.Errorf("creating parent directory: %w", err) + } + + return di.absPath, nil +} + +// copyResolvedSource copies a resolved source to the destination. +func copyResolvedSource(rsrc cpResolvedSource, destPath string, flags cpFlags, opts []blob.CopyOption, multiSource bool) (fileCount int, totalSize uint64, err error) { + srcPath := strings.TrimPrefix(rsrc.path, "/") + if srcPath == "" { + srcPath = "." + } + + if rsrc.isDir { + return copyDirectory(rsrc.archive, srcPath, rsrc.path, destPath, opts) + } + + // File copy - determine if copying to directory or specific file + destInfo, statErr := os.Stat(destPath) + destIsDir := statErr == nil && destInfo.IsDir() + + if destIsDir || multiSource { + return copyFileToDir(rsrc.archive, srcPath, rsrc.path, destPath, opts) + } + + return copyFileToFile(rsrc.archive, srcPath, rsrc.path, destPath, flags) +} + +// copyDirectory copies a directory recursively. +func copyDirectory(blobArchive *blob.Archive, srcPath, displayPath, destPath string, opts []blob.CopyOption) (fileCount int, totalSize uint64, err error) { + // Normalize path - strip trailing slash for CopyDir (fs.ValidPath rejects trailing slashes) + normalizedPath := strings.TrimSuffix(srcPath, "/") + if normalizedPath == "" { + normalizedPath = "." + } + + if err = blobArchive.CopyDir(destPath, normalizedPath, opts...); err != nil { + return 0, 0, fmt.Errorf("copying directory %s: %w", displayPath, err) + } + + // Use normalized path for prefix matching + prefix := normalizedPath + if prefix == "." { + prefix = "" + } + + // Count files - use same prefix logic + if prefix == "" { + // Root copy - count all entries + for entry := range blobArchive.Entries() { + if !entry.Mode().IsDir() { + fileCount++ + totalSize += entry.OriginalSize() + } + } + } else { + // Prefix copy - must add trailing slash for proper matching + for entry := range blobArchive.EntriesWithPrefix(prefix + "/") { + if !entry.Mode().IsDir() { + fileCount++ + totalSize += entry.OriginalSize() + } + } + // Also check for exact file match (in case normalizedPath was both file and prefix) + if entry, ok := blobArchive.Entry(normalizedPath); ok && !entry.Mode().IsDir() { + fileCount++ + totalSize += entry.OriginalSize() + } + } + + return fileCount, totalSize, nil +} + +// copyFileToDir copies a file into a directory. +func copyFileToDir(blobArchive *blob.Archive, srcPath, displayPath, destPath string, opts []blob.CopyOption) (fileCount int, totalSize uint64, err error) { + // Verify source exists and is a file + entry, ok := blobArchive.Entry(srcPath) + if !ok { + return 0, 0, fmt.Errorf("file not found: %s", displayPath) + } + if entry.Mode().IsDir() { + return 0, 0, fmt.Errorf("expected file but got directory: %s", displayPath) + } + + if err := blobArchive.CopyToWithOptions(destPath, []string{srcPath}, opts...); err != nil { + return 0, 0, fmt.Errorf("copying %s: %w", displayPath, err) + } + + return 1, entry.OriginalSize(), nil +} + +// copyFileToFile copies a single file to a specific destination path. +func copyFileToFile(blobArchive *blob.Archive, srcPath, displayPath, destPath string, flags cpFlags) (fileCount int, totalSize uint64, err error) { + entry, ok := blobArchive.Entry(srcPath) + if !ok { + return 0, 0, fmt.Errorf("file not found: %s", displayPath) + } + if entry.Mode().IsDir() { + return 0, 0, fmt.Errorf("expected file but got directory: %s", displayPath) + } + + // Check if destination exists BEFORE reading file content (avoid unnecessary downloads) + if _, statErr := os.Stat(destPath); statErr == nil { + if !flags.force { + // File exists and force not set - skip without error + return 0, 0, nil + } + } + + // Now read the file content (triggers HTTP range request) + content, err := blobArchive.ReadFile(srcPath) + if err != nil { + return 0, 0, fmt.Errorf("reading %s: %w", displayPath, err) + } + + perm := os.FileMode(0o644) + if flags.preserve { + perm = entry.Mode() + } + if err := os.WriteFile(destPath, content, perm); err != nil { + return 0, 0, fmt.Errorf("writing %s: %w", destPath, err) + } + + // Preserve modification time if requested + if flags.preserve { + if err := os.Chtimes(destPath, entry.ModTime(), entry.ModTime()); err != nil { + // Non-fatal error - log but continue + _ = err + } + } + + return 1, entry.OriginalSize(), nil +} + +// parseCpFlags extracts and validates flags from the command. +func parseCpFlags(cmd *cobra.Command) (cpFlags, error) { + var flags cpFlags + var err error + + flags.recursive, err = cmd.Flags().GetBool("recursive") + if err != nil { + return flags, fmt.Errorf("reading recursive flag: %w", err) + } + + flags.preserve, err = cmd.Flags().GetBool("preserve") + if err != nil { + return flags, fmt.Errorf("reading preserve flag: %w", err) + } + + flags.force, err = cmd.Flags().GetBool("force") + if err != nil { + return flags, fmt.Errorf("reading force flag: %w", err) + } + + return flags, nil +} + +// parseSourceArg parses a single source argument in "ref:/path" format. +func parseSourceArg(arg string, cfg *internalcfg.Config) (cpSource, error) { + // Find ":/" which separates ref from archive path + // Archive paths always start with "/" + idx := strings.Index(arg, ":/") + if idx == -1 { + return cpSource{}, fmt.Errorf("invalid source format %q: expected : (path must start with /)", arg) + } + + inputRef := arg[:idx] + archivePath := arg[idx+1:] // Include the leading / + + if inputRef == "" { + return cpSource{}, fmt.Errorf("invalid source format %q: reference cannot be empty", arg) + } + + resolvedRef := cfg.ResolveAlias(inputRef) + + return cpSource{ + inputRef: inputRef, + ref: resolvedRef, + path: archivePath, + }, nil +} + +// parseSourceArgs parses all source arguments. +func parseSourceArgs(args []string, cfg *internalcfg.Config) ([]cpSource, error) { + sources := make([]cpSource, 0, len(args)) + for _, arg := range args { + src, err := parseSourceArg(arg, cfg) + if err != nil { + return nil, err + } + sources = append(sources, src) + } + return sources, nil +} + +// buildCopyOpts creates copy options based on flags. +func buildCopyOpts(flags cpFlags) []blob.CopyOption { + opts := []blob.CopyOption{blob.CopyWithOverwrite(flags.force)} + if flags.preserve { + opts = append(opts, blob.CopyWithPreserveMode(true), blob.CopyWithPreserveTimes(true)) + } + return opts +} + +// outputCpResult formats and outputs the copy result. +func outputCpResult(cfg *internalcfg.Config, result *cpResult) error { + if cfg.Quiet { + return nil + } + if viper.GetString("output") == internalcfg.OutputJSON { + return cpJSON(result) + } + return cpText(result) +} + +func cpJSON(result *cpResult) error { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + return enc.Encode(result) +} + +func cpText(result *cpResult) error { + fmt.Printf("Copied %d file(s) (%s)\n", result.FileCount, result.SizeHuman) + for _, src := range result.Sources { + fmt.Printf(" %s:%s\n", src.Ref, src.Path) + } + fmt.Printf(" → %s\n", result.Destination) + return nil } diff --git a/cmd/cp_test.go b/cmd/cp_test.go new file mode 100644 index 0000000..93b30d3 --- /dev/null +++ b/cmd/cp_test.go @@ -0,0 +1,369 @@ +package cmd + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + internalcfg "github.com/meigma/blob-cli/internal/config" +) + +func TestCpCmd_NilConfig(t *testing.T) { + viper.Reset() + + ctx := context.Background() + + cpCmd.SetContext(ctx) + err := cpCmd.RunE(cpCmd, []string{"ghcr.io/test:v1:/config.json", "./dest"}) + + require.Error(t, err) + assert.Contains(t, err.Error(), "configuration not loaded") +} + +func TestCpCmd_MinimumArgs(t *testing.T) { + // Verify command requires at least 2 args (source + dest) + assert.Equal(t, "cp :... ", cpCmd.Use) + + // Cobra's MinimumNArgs(2) is set + err := cpCmd.Args(cpCmd, []string{"only-one-arg"}) + require.Error(t, err) + + err = cpCmd.Args(cpCmd, []string{"source", "dest"}) + require.NoError(t, err) + + err = cpCmd.Args(cpCmd, []string{"src1", "src2", "dest"}) + require.NoError(t, err) +} + +func TestParseSourceArg(t *testing.T) { + cfg := &internalcfg.Config{ + Aliases: map[string]string{ + "myalias": "ghcr.io/acme/repo", + }, + } + + tests := []struct { + name string + arg string + wantRef string + wantPath string + wantInputRef string + wantErr string + }{ + { + name: "standard ref with tag", + arg: "ghcr.io/acme/repo:v1.0.0:/config.json", + wantRef: "ghcr.io/acme/repo:v1.0.0", + wantPath: "/config.json", + wantInputRef: "ghcr.io/acme/repo:v1.0.0", + }, + { + name: "ref with registry port", + arg: "registry:5000/repo:v1:/path/to/file", + wantRef: "registry:5000/repo:v1", + wantPath: "/path/to/file", + wantInputRef: "registry:5000/repo:v1", + }, + { + name: "alias resolution", + arg: "myalias:/config.json", + wantRef: "ghcr.io/acme/repo:latest", + wantPath: "/config.json", + wantInputRef: "myalias", + }, + { + name: "alias with tag override", + arg: "myalias:v2:/config.json", + wantRef: "ghcr.io/acme/repo:v2", + wantPath: "/config.json", + wantInputRef: "myalias:v2", + }, + { + name: "directory path with trailing slash", + arg: "ghcr.io/acme/repo:v1:/etc/nginx/", + wantRef: "ghcr.io/acme/repo:v1", + wantPath: "/etc/nginx/", + wantInputRef: "ghcr.io/acme/repo:v1", + }, + { + name: "root path", + arg: "ghcr.io/acme/repo:v1:/", + wantRef: "ghcr.io/acme/repo:v1", + wantPath: "/", + wantInputRef: "ghcr.io/acme/repo:v1", + }, + { + name: "directory path without trailing slash", + arg: "ghcr.io/acme/repo:v1:/etc/nginx", + wantRef: "ghcr.io/acme/repo:v1", + wantPath: "/etc/nginx", + wantInputRef: "ghcr.io/acme/repo:v1", + }, + { + name: "missing path separator", + arg: "ghcr.io/acme/repo:v1", + wantErr: "invalid source format", + }, + { + name: "empty reference", + arg: ":/config.json", + wantErr: "reference cannot be empty", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + src, err := parseSourceArg(tt.arg, cfg) + + if tt.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.wantRef, src.ref) + assert.Equal(t, tt.wantPath, src.path) + assert.Equal(t, tt.wantInputRef, src.inputRef) + }) + } +} + +func TestGetDestInfo(t *testing.T) { + tmpDir := t.TempDir() + existingDir := filepath.Join(tmpDir, "existing-dir") + require.NoError(t, os.MkdirAll(existingDir, 0o755)) + existingFile := filepath.Join(tmpDir, "existing-file") + require.NoError(t, os.WriteFile(existingFile, []byte("test"), 0o644)) + + tests := []struct { + name string + dest string + wantExists bool + wantIsDir bool + wantEndsSlash bool + }{ + { + name: "existing directory", + dest: existingDir, + wantExists: true, + wantIsDir: true, + }, + { + name: "existing file", + dest: existingFile, + wantExists: true, + wantIsDir: false, + }, + { + name: "non-existent path", + dest: filepath.Join(tmpDir, "new-file"), + wantExists: false, + wantIsDir: false, + }, + { + name: "path ending with slash", + dest: filepath.Join(tmpDir, "newdir") + "/", + wantExists: false, + wantEndsSlash: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + di, err := getDestInfo(tt.dest) + require.NoError(t, err) + assert.Equal(t, tt.wantExists, di.exists) + assert.Equal(t, tt.wantIsDir, di.isDir) + assert.Equal(t, tt.wantEndsSlash, di.endsWithSlash) + }) + } +} + +func TestValidateAndPrepareDestination(t *testing.T) { + tmpDir := t.TempDir() + existingDir := filepath.Join(tmpDir, "existing-dir") + require.NoError(t, os.MkdirAll(existingDir, 0o755)) + existingFile := filepath.Join(tmpDir, "existing-file") + require.NoError(t, os.WriteFile(existingFile, []byte("test"), 0o644)) + + tests := []struct { + name string + sources []cpResolvedSource + dest string + flags cpFlags + wantErr string + }{ + { + name: "single file to non-existent path", + sources: []cpResolvedSource{{isDir: false, cpSource: cpSource{path: "/config.json"}}}, + dest: filepath.Join(tmpDir, "new-file.json"), + flags: cpFlags{recursive: true}, + }, + { + name: "single file to existing directory", + sources: []cpResolvedSource{{isDir: false, cpSource: cpSource{path: "/config.json"}}}, + dest: existingDir, + flags: cpFlags{recursive: true}, + }, + { + name: "single file to path ending with slash", + sources: []cpResolvedSource{{isDir: false, cpSource: cpSource{path: "/config.json"}}}, + dest: filepath.Join(tmpDir, "newdir") + "/", + flags: cpFlags{recursive: true}, + }, + { + name: "directory source with recursive flag", + sources: []cpResolvedSource{{isDir: true, cpSource: cpSource{path: "/etc/nginx"}}}, + dest: filepath.Join(tmpDir, "nginx-config"), + flags: cpFlags{recursive: true}, + }, + { + name: "directory source without recursive flag", + sources: []cpResolvedSource{{isDir: true, cpSource: cpSource{path: "/etc/nginx"}}}, + dest: filepath.Join(tmpDir, "nginx-config"), + flags: cpFlags{recursive: false}, + wantErr: "without -r flag", + }, + { + name: "multiple sources to directory", + sources: []cpResolvedSource{ + {isDir: false, cpSource: cpSource{path: "/a.json"}}, + {isDir: false, cpSource: cpSource{path: "/b.json"}}, + }, + dest: existingDir, + flags: cpFlags{recursive: true}, + }, + { + name: "multiple sources to non-existent directory", + sources: []cpResolvedSource{ + {isDir: false, cpSource: cpSource{path: "/a.json"}}, + {isDir: false, cpSource: cpSource{path: "/b.json"}}, + }, + dest: filepath.Join(tmpDir, "new-dir"), + flags: cpFlags{recursive: true}, + }, + { + name: "multiple sources to existing file", + sources: []cpResolvedSource{ + {isDir: false, cpSource: cpSource{path: "/a.json"}}, + {isDir: false, cpSource: cpSource{path: "/b.json"}}, + }, + dest: existingFile, + flags: cpFlags{recursive: true}, + wantErr: "destination must be a directory", + }, + { + name: "mixed file and directory sources", + sources: []cpResolvedSource{ + {isDir: false, cpSource: cpSource{path: "/config.json"}}, + {isDir: true, cpSource: cpSource{path: "/etc/nginx"}}, + }, + dest: filepath.Join(tmpDir, "mixed-dest"), + flags: cpFlags{recursive: true}, + }, + { + name: "directory source to existing file", + sources: []cpResolvedSource{{isDir: true, cpSource: cpSource{path: "/etc/nginx"}}}, + dest: existingFile, + flags: cpFlags{recursive: true}, + wantErr: "cannot copy directory", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + path, err := validateAndPrepareDestination(tt.sources, tt.dest, tt.flags) + + if tt.wantErr != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tt.wantErr) + return + } + + require.NoError(t, err) + assert.NotEmpty(t, path) + }) + } +} + +func TestCpFlags(t *testing.T) { + // Reset flags for testing + cpCmd.Flags().Set("recursive", "false") + cpCmd.Flags().Set("preserve", "true") + cpCmd.Flags().Set("force", "true") + + flags, err := parseCpFlags(cpCmd) + require.NoError(t, err) + assert.False(t, flags.recursive) + assert.True(t, flags.preserve) + assert.True(t, flags.force) + + // Reset to defaults + cpCmd.Flags().Set("recursive", "true") + cpCmd.Flags().Set("preserve", "false") + cpCmd.Flags().Set("force", "false") + + flags, err = parseCpFlags(cpCmd) + require.NoError(t, err) + assert.True(t, flags.recursive) + assert.False(t, flags.preserve) + assert.False(t, flags.force) +} + +func TestCpJSON(t *testing.T) { + result := &cpResult{ + Sources: []cpSourceResult{ + {Ref: "ghcr.io/test:v1", Path: "/config.json"}, + }, + Destination: "/tmp/dest", + FileCount: 1, + TotalSize: 1024, + SizeHuman: "1.0K", + } + + // Capture stdout + oldStdout := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + err := cpJSON(result) + + w.Close() + os.Stdout = oldStdout + + require.NoError(t, err) + + var got cpResult + err = json.NewDecoder(r).Decode(&got) + require.NoError(t, err) + + assert.Equal(t, result.Destination, got.Destination) + assert.Equal(t, result.FileCount, got.FileCount) + assert.Equal(t, result.TotalSize, got.TotalSize) + require.Len(t, got.Sources, 1) + assert.Equal(t, "/config.json", got.Sources[0].Path) +} + +func TestBuildCopyOpts(t *testing.T) { + // Without preserve, without force + flags := cpFlags{recursive: true, preserve: false, force: false} + opts := buildCopyOpts(flags) + assert.Len(t, opts, 1) // Only overwrite option (set to false) + + // With preserve + flags = cpFlags{recursive: true, preserve: true, force: false} + opts = buildCopyOpts(flags) + assert.Len(t, opts, 3) // overwrite + mode + times + + // With force + flags = cpFlags{recursive: true, preserve: false, force: true} + opts = buildCopyOpts(flags) + assert.Len(t, opts, 1) // overwrite option (set to true) +}