From eac71afda2a73d5ea33f37a97ee9e854c72e9be1 Mon Sep 17 00:00:00 2001 From: Brian Witt Date: Thu, 13 Nov 2025 14:45:35 -0800 Subject: [PATCH] error on out of space --- AUTHORS | 1 + api/files.go | 43 +++- api/files_test.go | 476 +++++++++++++++++++++++++++++++++++++++++++ files/public.go | 25 ++- files/public_test.go | 383 ++++++++++++++++++++++++++++++++++ 5 files changed, 924 insertions(+), 4 deletions(-) create mode 100644 api/files_test.go diff --git a/AUTHORS b/AUTHORS index 0014b68c8..2f1b07aac 100644 --- a/AUTHORS +++ b/AUTHORS @@ -78,3 +78,4 @@ List of contributors, in chronological order: * Juan Calderon-Perez (https://github.com/gaby) * Ato Araki (https://github.com/atotto) * Roman Lebedev (https://github.com/LebedevRI) +* Brian Witt (https://github.com/bwitt) diff --git a/api/files.go b/api/files.go index 7c6ad54d4..e51ea0069 100644 --- a/api/files.go +++ b/api/files.go @@ -13,6 +13,10 @@ import ( "github.com/saracen/walker" ) +// syncFile is a seam to allow tests to force fsync failures (e.g. ENOSPC). +// In production it calls (*os.File).Sync(). +var syncFile = func(f *os.File) error { return f.Sync() } + func verifyPath(path string) bool { path = filepath.Clean(path) for _, part := range strings.Split(path, string(filepath.Separator)) { @@ -114,34 +118,69 @@ func apiFilesUpload(c *gin.Context) { } stored := []string{} + openFiles := []*os.File{} + // Write all files first for _, files := range c.Request.MultipartForm.File { for _, file := range files { src, err := file.Open() if err != nil { + // Close any files we've opened + for _, f := range openFiles { + _ = f.Close() + } AbortWithJSONError(c, 500, err) return } - defer func() { _ = src.Close() }() destPath := filepath.Join(path, filepath.Base(file.Filename)) dst, err := os.Create(destPath) if err != nil { + _ = src.Close() + // Close any files we've opened + for _, f := range openFiles { + _ = f.Close() + } AbortWithJSONError(c, 500, err) return } - defer func() { _ = dst.Close() }() _, err = io.Copy(dst, src) + _ = src.Close() if err != nil { + _ = dst.Close() + // Close any files we've opened + for _, f := range openFiles { + _ = f.Close() + } AbortWithJSONError(c, 500, err) return } + // Keep file open for batch sync + openFiles = append(openFiles, dst) stored = append(stored, filepath.Join(c.Params.ByName("dir"), filepath.Base(file.Filename))) } } + // Sync all files at once to catch ENOSPC errors + for i, dst := range openFiles { + err := syncFile(dst) + if err != nil { + // Close all files + for _, f := range openFiles { + _ = f.Close() + } + AbortWithJSONError(c, 500, fmt.Errorf("error syncing file %s: %s", stored[i], err)) + return + } + } + + // Close all files + for _, dst := range openFiles { + _ = dst.Close() + } + apiFilesUploadedCounter.WithLabelValues(c.Params.ByName("dir")).Inc() c.JSON(200, stored) } diff --git a/api/files_test.go b/api/files_test.go new file mode 100644 index 000000000..9083a8d1c --- /dev/null +++ b/api/files_test.go @@ -0,0 +1,476 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "mime/multipart" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/aptly-dev/aptly/aptly" + ctx "github.com/aptly-dev/aptly/context" + "github.com/gin-gonic/gin" + "github.com/smira/flag" + + . "gopkg.in/check.v1" +) + +type FilesUploadDiskFullSuite struct { + aptlyContext *ctx.AptlyContext + flags *flag.FlagSet + configFile *os.File + router http.Handler +} + +var _ = Suite(&FilesUploadDiskFullSuite{}) + +func (s *FilesUploadDiskFullSuite) SetUpTest(c *C) { + aptly.Version = "testVersion" + + file, err := os.CreateTemp("", "aptly") + c.Assert(err, IsNil) + s.configFile = file + + jsonString, err := json.Marshal(gin.H{ + "architectures": []string{}, + "rootDir": c.MkDir(), + }) + c.Assert(err, IsNil) + _, err = file.Write(jsonString) + c.Assert(err, IsNil) + _ = file.Close() + + flags := flag.NewFlagSet("fakeFlags", flag.ContinueOnError) + flags.Bool("no-lock", false, "dummy") + flags.Int("db-open-attempts", 3, "dummy") + flags.String("config", s.configFile.Name(), "dummy") + flags.String("architectures", "", "dummy") + s.flags = flags + + aptlyContext, err := ctx.NewContext(s.flags) + c.Assert(err, IsNil) + + s.aptlyContext = aptlyContext + s.router = Router(aptlyContext) + context = aptlyContext +} + +func (s *FilesUploadDiskFullSuite) TearDownTest(c *C) { + if s.configFile != nil { + _ = os.Remove(s.configFile.Name()) + } + if s.aptlyContext != nil { + s.aptlyContext.Shutdown() + } +} + +func (s *FilesUploadDiskFullSuite) TestUploadSuccessWithSync(c *C) { + testContent := []byte("test file content for upload") + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("file", "testfile.txt") + c.Assert(err, IsNil) + + _, err = part.Write(testContent) + c.Assert(err, IsNil) + + err = writer.Close() + c.Assert(err, IsNil) + + req, err := http.NewRequest("POST", "/api/files/testdir", body) + c.Assert(err, IsNil) + req.Header.Set("Content-Type", writer.FormDataContentType()) + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 200) + + uploadedFile := filepath.Join(s.aptlyContext.Config().GetRootDir(), "upload", "testdir", "testfile.txt") + content, err := os.ReadFile(uploadedFile) + c.Assert(err, IsNil) + c.Check(content, DeepEquals, testContent) +} + +func (s *FilesUploadDiskFullSuite) TestUploadVerifiesFileIntegrity(c *C) { + testContent := bytes.Repeat([]byte("A"), 10000) + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part, err := writer.CreateFormFile("file", "largefile.bin") + c.Assert(err, IsNil) + + _, err = io.Copy(part, bytes.NewReader(testContent)) + c.Assert(err, IsNil) + + err = writer.Close() + c.Assert(err, IsNil) + + req, err := http.NewRequest("POST", "/api/files/testdir2", body) + c.Assert(err, IsNil) + req.Header.Set("Content-Type", writer.FormDataContentType()) + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 200) + + uploadedFile := filepath.Join(s.aptlyContext.Config().GetRootDir(), "upload", "testdir2", "largefile.bin") + content, err := os.ReadFile(uploadedFile) + c.Assert(err, IsNil) + c.Check(len(content), Equals, len(testContent)) + c.Check(content, DeepEquals, testContent) +} + +func (s *FilesUploadDiskFullSuite) TestUploadMultipleFilesWithBatchSync(c *C) { + testFiles := map[string][]byte{ + "file1.txt": []byte("content of file 1"), + "file2.txt": bytes.Repeat([]byte("B"), 5000), + "file3.deb": []byte("debian package content"), + } + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + for filename, content := range testFiles { + part, err := writer.CreateFormFile("file", filename) + c.Assert(err, IsNil) + _, err = part.Write(content) + c.Assert(err, IsNil) + } + + err := writer.Close() + c.Assert(err, IsNil) + + req, err := http.NewRequest("POST", "/api/files/multitest", body) + c.Assert(err, IsNil) + req.Header.Set("Content-Type", writer.FormDataContentType()) + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 200) + + uploadDir := filepath.Join(s.aptlyContext.Config().GetRootDir(), "upload", "multitest") + for filename, expectedContent := range testFiles { + uploadedFile := filepath.Join(uploadDir, filename) + content, err := os.ReadFile(uploadedFile) + c.Assert(err, IsNil, Commentf("Failed to read %s", filename)) + c.Check(content, DeepEquals, expectedContent, Commentf("Content mismatch for %s", filename)) + } +} + +func (s *FilesUploadDiskFullSuite) TestUploadReturnsErrorOnSyncFailure(c *C) { + oldSyncFile := syncFile + syncFile = func(f *os.File) error { + if filepath.Base(f.Name()) == "syncfail.txt" { + return syscall.ENOSPC + } + return nil + } + defer func() { syncFile = oldSyncFile }() + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + + part1, err := writer.CreateFormFile("file", "ok.txt") + c.Assert(err, IsNil) + _, err = part1.Write([]byte("ok")) + c.Assert(err, IsNil) + + part2, err := writer.CreateFormFile("file", "syncfail.txt") + c.Assert(err, IsNil) + _, err = part2.Write([]byte("will fail on sync")) + c.Assert(err, IsNil) + + err = writer.Close() + c.Assert(err, IsNil) + + req, err := http.NewRequest("POST", "/api/files/syncfaildir", body) + c.Assert(err, IsNil) + req.Header.Set("Content-Type", writer.FormDataContentType()) + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 500) + c.Check(bytes.Contains(w.Body.Bytes(), []byte("error syncing file")), Equals, true) +} + +func (s *FilesUploadDiskFullSuite) TestVerifyPath(c *C) { + c.Check(verifyPath("a/b/c"), Equals, true) + c.Check(verifyPath("../x"), Equals, false) + c.Check(verifyPath("./x"), Equals, true) + c.Check(verifyPath(".."), Equals, false) + c.Check(verifyPath("."), Equals, false) +} + +func (s *FilesUploadDiskFullSuite) TestListDirsEmptyWhenUploadMissing(c *C) { + _ = os.RemoveAll(s.aptlyContext.UploadPath()) + + req, err := http.NewRequest("GET", "/api/files", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 200) + c.Check(strings.TrimSpace(w.Body.String()), Equals, "[]") +} + +func (s *FilesUploadDiskFullSuite) TestListDirsReturnsDirectories(c *C) { + uploadRoot := s.aptlyContext.UploadPath() + c.Assert(os.MkdirAll(filepath.Join(uploadRoot, "d1"), 0777), IsNil) + c.Assert(os.MkdirAll(filepath.Join(uploadRoot, "d2"), 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(uploadRoot, "rootfile"), []byte("x"), 0644), IsNil) + + req, err := http.NewRequest("GET", "/api/files", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 200) + body := w.Body.String() + c.Check(strings.Contains(body, "d1"), Equals, true) + c.Check(strings.Contains(body, "d2"), Equals, true) +} + +func (s *FilesUploadDiskFullSuite) TestListFilesNotFound(c *C) { + req, err := http.NewRequest("GET", "/api/files/does-not-exist", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 404) +} + +func (s *FilesUploadDiskFullSuite) TestListFilesReturnsFiles(c *C) { + base := filepath.Join(s.aptlyContext.UploadPath(), "dir") + c.Assert(os.MkdirAll(base, 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil) + c.Assert(os.WriteFile(filepath.Join(base, "b.txt"), []byte("b"), 0644), IsNil) + + req, err := http.NewRequest("GET", "/api/files/dir", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 200) + body := w.Body.String() + c.Check(strings.Contains(body, "a.txt"), Equals, true) + c.Check(strings.Contains(body, "b.txt"), Equals, true) +} + +func (s *FilesUploadDiskFullSuite) TestDeleteDirRemovesDirectory(c *C) { + base := filepath.Join(s.aptlyContext.UploadPath(), "todel") + c.Assert(os.MkdirAll(base, 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil) + + req, err := http.NewRequest("DELETE", "/api/files/todel", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 200) + + _, statErr := os.Stat(base) + c.Check(os.IsNotExist(statErr), Equals, true) +} + +func (s *FilesUploadDiskFullSuite) TestDeleteFileRemovesFile(c *C) { + base := filepath.Join(s.aptlyContext.UploadPath(), "todel2") + c.Assert(os.MkdirAll(base, 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil) + + req, err := http.NewRequest("DELETE", "/api/files/todel2/a.txt", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 200) + + _, statErr := os.Stat(filepath.Join(base, "a.txt")) + c.Check(os.IsNotExist(statErr), Equals, true) +} + +func (s *FilesUploadDiskFullSuite) TestDeleteFileNotFoundStillOk(c *C) { + base := filepath.Join(s.aptlyContext.UploadPath(), "todel3") + c.Assert(os.MkdirAll(base, 0777), IsNil) + + req, err := http.NewRequest("DELETE", "/api/files/todel3/nope.txt", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 200) +} + +func (s *FilesUploadDiskFullSuite) TestRejectsInvalidDir(c *C) { + req, err := http.NewRequest("DELETE", "/api/files/..", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 400) +} + +func (s *FilesUploadDiskFullSuite) TestRejectsInvalidFileName(c *C) { + base := filepath.Join(s.aptlyContext.UploadPath(), "dirx") + c.Assert(os.MkdirAll(base, 0777), IsNil) + + req, err := http.NewRequest("DELETE", "/api/files/dirx/..", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 400) +} + +func (s *FilesUploadDiskFullSuite) TestListDirsEmptyIfUploadPathIsNotDir(c *C) { + _ = os.RemoveAll(s.aptlyContext.UploadPath()) + c.Assert(os.WriteFile(s.aptlyContext.UploadPath(), []byte("not a dir"), 0644), IsNil) + + req, err := http.NewRequest("GET", "/api/files", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 200) + c.Check(strings.TrimSpace(w.Body.String()), Equals, "[]") +} + +func (s *FilesUploadDiskFullSuite) TestListFilesReturns500OnPermissionError(c *C) { + base := filepath.Join(s.aptlyContext.UploadPath(), "noperms") + c.Assert(os.MkdirAll(base, 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil) + c.Assert(os.Chmod(base, 0), IsNil) + defer func() { _ = os.Chmod(base, 0777) }() + + req, err := http.NewRequest("GET", "/api/files/noperms", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 500) +} + +func (s *FilesUploadDiskFullSuite) TestDeleteFileReturns500OnNonNotExistError(c *C) { + base := filepath.Join(s.aptlyContext.UploadPath(), "dirisfile") + c.Assert(os.MkdirAll(base, 0777), IsNil) + subdir := filepath.Join(base, "subdir") + c.Assert(os.MkdirAll(subdir, 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(subdir, "x"), []byte("x"), 0644), IsNil) + + req, err := http.NewRequest("DELETE", "/api/files/dirisfile/subdir", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 500) +} + +func (s *FilesUploadDiskFullSuite) TestUploadBadMultipartReturns400(c *C) { + req, err := http.NewRequest("POST", "/api/files/badmultipart", bytes.NewBufferString("not multipart")) + c.Assert(err, IsNil) + req.Header.Set("Content-Type", "multipart/form-data; boundary=missing") + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + + c.Assert(w.Code, Equals, 400) +} + +func (s *FilesUploadDiskFullSuite) TestUploadRejectsInvalidDir(c *C) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", "a.txt") + c.Assert(err, IsNil) + _, err = part.Write([]byte("x")) + c.Assert(err, IsNil) + c.Assert(writer.Close(), IsNil) + + req, err := http.NewRequest("POST", "/api/files/..", body) + c.Assert(err, IsNil) + req.Header.Set("Content-Type", writer.FormDataContentType()) + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 400) +} + +func (s *FilesUploadDiskFullSuite) TestUploadReturns500IfUploadRootIsNotDir(c *C) { + _ = os.RemoveAll(s.aptlyContext.UploadPath()) + c.Assert(os.WriteFile(s.aptlyContext.UploadPath(), []byte("not a dir"), 0644), IsNil) + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", "a.txt") + c.Assert(err, IsNil) + _, err = part.Write([]byte("x")) + c.Assert(err, IsNil) + c.Assert(writer.Close(), IsNil) + + req, err := http.NewRequest("POST", "/api/files/testdir", body) + c.Assert(err, IsNil) + req.Header.Set("Content-Type", writer.FormDataContentType()) + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 500) +} + +func (s *FilesUploadDiskFullSuite) TestUploadReturns500OnFileOpenFailure(c *C) { + // Pre-populate MultipartForm to inject a FileHeader that fails on Open(). + form := &multipart.Form{ + File: map[string][]*multipart.FileHeader{ + "file": {{Filename: "broken.bin"}}, + }, + } + + req, err := http.NewRequest("POST", "/api/files/openfaildir", nil) + c.Assert(err, IsNil) + req.MultipartForm = form + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 500) +} + +func (s *FilesUploadDiskFullSuite) TestUploadReturns500OnCreateFailure(c *C) { + base := filepath.Join(s.aptlyContext.UploadPath(), "readonly") + c.Assert(os.MkdirAll(base, 0777), IsNil) + c.Assert(os.Chmod(base, 0555), IsNil) + defer func() { _ = os.Chmod(base, 0777) }() + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile("file", "a.txt") + c.Assert(err, IsNil) + _, err = part.Write([]byte("x")) + c.Assert(err, IsNil) + c.Assert(writer.Close(), IsNil) + + req, err := http.NewRequest("POST", "/api/files/readonly", body) + c.Assert(err, IsNil) + req.Header.Set("Content-Type", writer.FormDataContentType()) + + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 500) +} + +func (s *FilesUploadDiskFullSuite) TestDeleteDirReturns500OnRemoveFailure(c *C) { + parent := s.aptlyContext.UploadPath() + base := filepath.Join(parent, "cantremove") + c.Assert(os.MkdirAll(base, 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(base, "a.txt"), []byte("a"), 0644), IsNil) + + c.Assert(os.Chmod(parent, 0555), IsNil) + defer func() { _ = os.Chmod(parent, 0777) }() + + req, err := http.NewRequest("DELETE", "/api/files/cantremove", nil) + c.Assert(err, IsNil) + w := httptest.NewRecorder() + s.router.ServeHTTP(w, req) + c.Assert(w.Code, Equals, 500) +} diff --git a/files/public.go b/files/public.go index 7cf36ec8f..a2d3f8152 100644 --- a/files/public.go +++ b/files/public.go @@ -15,6 +15,10 @@ import ( "github.com/saracen/walker" ) +// syncFile is a seam to allow tests to force fsync failures (e.g. ENOSPC). +// In production it calls (*os.File).Sync(). +var syncFile = func(f *os.File) error { return f.Sync() } + // PublishedStorage abstract file system with public dirs (published repos) type PublishedStorage struct { rootPath string @@ -25,7 +29,7 @@ type PublishedStorage struct { // Global mutex map to prevent concurrent access to the same destinationPath in LinkFromPool var ( fileLockMutex sync.Mutex - fileLocks = make(map[string]*sync.Mutex) + fileLocks = make(map[string]*sync.Mutex) ) // getFileLock returns a mutex for a specific file path to prevent concurrent modifications @@ -119,7 +123,17 @@ func (storage *PublishedStorage) PutFile(path string, sourceFilename string) err }() _, err = io.Copy(f, source) - return err + if err != nil { + return err + } + + // Sync to ensure all data is written to disk and catch ENOSPC errors + err = syncFile(f) + if err != nil { + return fmt.Errorf("error syncing file %s: %s", path, err) + } + + return nil } // Remove removes single file under public path @@ -268,6 +282,13 @@ func (storage *PublishedStorage) LinkFromPool(publishedPrefix, publishedRelPath, return err } + // Sync to ensure all data is written to disk and catch ENOSPC errors + err = syncFile(dst) + if err != nil { + _ = dst.Close() + return fmt.Errorf("error syncing file %s: %s", destinationPath, err) + } + err = dst.Close() } else if storage.linkMethod == LinkMethodSymLink { err = localSourcePool.Symlink(sourcePath, destinationPath) diff --git a/files/public_test.go b/files/public_test.go index 14413167e..31cd2f2ba 100644 --- a/files/public_test.go +++ b/files/public_test.go @@ -1,8 +1,14 @@ package files import ( + "bytes" + "errors" + "io" "os" + "os/exec" "path/filepath" + "runtime" + "strings" "syscall" "github.com/aptly-dev/aptly/aptly" @@ -11,6 +17,77 @@ import ( . "gopkg.in/check.v1" ) +type fakeProgress struct{ bytes.Buffer } + +func (p *fakeProgress) Start() {} +func (p *fakeProgress) Shutdown() {} +func (p *fakeProgress) Flush() {} +func (p *fakeProgress) InitBar(count int64, isBytes bool, barType aptly.BarType) { +} +func (p *fakeProgress) ShutdownBar() {} +func (p *fakeProgress) AddBar(count int) {} +func (p *fakeProgress) SetBar(count int) {} +func (p *fakeProgress) Printf(msg string, a ...interface{}) { +} +func (p *fakeProgress) ColoredPrintf(msg string, a ...interface{}) { +} +func (p *fakeProgress) PrintfStdErr(msg string, a ...interface{}) { +} + +type fakeRSC struct { + *bytes.Reader + closeErr error +} + +func (r *fakeRSC) Close() error { return r.closeErr } + +type fakePool struct { + sizeErr error + openFn func(string) (aptly.ReadSeekerCloser, error) +} + +type fakeLocalPool struct { + fakePool + statErr error +} + +func (p *fakeLocalPool) Stat(path string) (os.FileInfo, error) { return nil, p.statErr } +func (p *fakeLocalPool) GenerateTempPath(filename string) (string, error) { + return "", nil +} +func (p *fakeLocalPool) Link(path, dstPath string) error { return nil } +func (p *fakeLocalPool) Symlink(path, dstPath string) error { return nil } +func (p *fakeLocalPool) FullPath(path string) string { return path } + +func (p *fakePool) Verify(poolPath, basename string, checksums *utils.ChecksumInfo, checksumStorage aptly.ChecksumStorage) (string, bool, error) { + return "", false, nil +} + +func (p *fakePool) Import(srcPath, basename string, checksums *utils.ChecksumInfo, move bool, storage aptly.ChecksumStorage) (string, error) { + return "", nil +} + +func (p *fakePool) LegacyPath(filename string, checksums *utils.ChecksumInfo) (string, error) { + return "", nil +} + +func (p *fakePool) Size(path string) (int64, error) { + if p.sizeErr != nil { + return 0, p.sizeErr + } + return int64(len(path)), nil +} + +func (p *fakePool) Open(path string) (aptly.ReadSeekerCloser, error) { + if p.openFn != nil { + return p.openFn(path) + } + return nil, io.EOF +} + +func (p *fakePool) FilepathList(progress aptly.Progress) ([]string, error) { return nil, nil } +func (p *fakePool) Remove(path string) (int64, error) { return 0, nil } + type PublishedStorageSuite struct { root string storage *PublishedStorage @@ -69,6 +146,14 @@ func (s *PublishedStorageSuite) TestPutFile(c *C) { c.Assert(err, IsNil) } +func (s *PublishedStorageSuite) TestPutFileReturnsErrorIfSourceMissing(c *C) { + err := s.storage.MkDir("ppa/dists/squeeze/") + c.Assert(err, IsNil) + + err = s.storage.PutFile("ppa/dists/squeeze/Release", filepath.Join(s.root, "no-such-file")) + c.Assert(err, NotNil) +} + func (s *PublishedStorageSuite) TestFilelist(c *C) { err := s.storage.MkDir("ppa/pool/main/a/ab/") c.Assert(err, IsNil) @@ -134,6 +219,11 @@ func (s *PublishedStorageSuite) TestSymLink(c *C) { c.Assert(linkTarget, Equals, "ppa/dists/squeeze/Release") } +func (s *PublishedStorageSuite) TestReadLinkReturnsErrorOnMissingPath(c *C) { + _, err := s.storage.ReadLink("does/not/exist") + c.Assert(err, NotNil) +} + func (s *PublishedStorageSuite) TestHardLink(c *C) { err := s.storage.MkDir("ppa/dists/squeeze/") c.Assert(err, IsNil) @@ -163,6 +253,18 @@ func (s *PublishedStorageSuite) TestRemoveDirs(c *C) { c.Assert(os.IsNotExist(err), Equals, true) } +func (s *PublishedStorageSuite) TestRemoveDirsWithProgress(c *C) { + err := s.storage.MkDir("ppa/dists/squeeze/") + c.Assert(err, IsNil) + + err = s.storage.PutFile("ppa/dists/squeeze/Release", "/dev/null") + c.Assert(err, IsNil) + + p := &fakeProgress{} + err = s.storage.RemoveDirs("ppa/dists/", p) + c.Assert(err, IsNil) +} + func (s *PublishedStorageSuite) TestRemove(c *C) { err := s.storage.MkDir("ppa/dists/squeeze/") c.Assert(err, IsNil) @@ -337,3 +439,284 @@ func (s *PublishedStorageSuite) TestRootRemove(c *C) { dirStorage := NewPublishedStorage(pwd, "", "") c.Assert(func() { _ = dirStorage.RemoveDirs("", nil) }, PanicMatches, "trying to remove the root directory") } + +// DiskFullSuite uses a loopback mount; requires Linux + root. + +type DiskFullSuite struct { + root string +} + +var _ = Suite(&DiskFullSuite{}) + +func (s *DiskFullSuite) SetUpTest(c *C) { + if runtime.GOOS != "linux" { + c.Skip("disk full tests only run on Linux") + } + if os.Geteuid() != 0 { + c.Skip("disk full tests require root privileges") + } + + s.root = c.MkDir() +} + +func (s *DiskFullSuite) TestPutFileOutOfSpace(c *C) { + mountPoint := filepath.Join(s.root, "smallfs") + err := os.MkdirAll(mountPoint, 0777) + c.Assert(err, IsNil) + fsImage := filepath.Join(s.root, "small.img") + cmd := exec.Command("dd", "if=/dev/zero", "of="+fsImage, "bs=1M", "count=1") + err = cmd.Run() + c.Assert(err, IsNil) + cmd = exec.Command("mkfs.ext4", "-F", fsImage) + err = cmd.Run() + c.Assert(err, IsNil) + cmd = exec.Command("mount", "-o", "loop", fsImage, mountPoint) + err = cmd.Run() + c.Assert(err, IsNil) + defer func() { + _ = exec.Command("umount", mountPoint).Run() + }() + + storage := NewPublishedStorage(mountPoint, "", "") + largeFile := filepath.Join(s.root, "largefile") + cmd = exec.Command("dd", "if=/dev/zero", "of="+largeFile, "bs=1M", "count=2") + err = cmd.Run() + c.Assert(err, IsNil) + + err = storage.PutFile("testfile", largeFile) + c.Assert(err, NotNil) + c.Check(strings.Contains(err.Error(), "no space left on device") || + strings.Contains(err.Error(), "sync"), Equals, true, + Commentf("Expected disk full error, got: %v", err)) +} + +func (s *DiskFullSuite) TestLinkFromPoolCopyOutOfSpace(c *C) { + mountPoint := filepath.Join(s.root, "smallfs") + err := os.MkdirAll(mountPoint, 0777) + c.Assert(err, IsNil) + fsImage := filepath.Join(s.root, "small.img") + + cmd := exec.Command("dd", "if=/dev/zero", "of="+fsImage, "bs=1M", "count=1") + err = cmd.Run() + c.Assert(err, IsNil) + + cmd = exec.Command("mkfs.ext4", "-F", fsImage) + err = cmd.Run() + c.Assert(err, IsNil) + + cmd = exec.Command("mount", "-o", "loop", fsImage, mountPoint) + err = cmd.Run() + c.Assert(err, IsNil) + defer func() { + _ = exec.Command("umount", mountPoint).Run() + }() + + storage := NewPublishedStorage(mountPoint, "copy", "") + + poolPath := filepath.Join(s.root, "pool") + pool := NewPackagePool(poolPath, false) + cs := NewMockChecksumStorage() + + largeFile := filepath.Join(s.root, "package.deb") + cmd = exec.Command("dd", "if=/dev/zero", "of="+largeFile, "bs=1M", "count=2") + err = cmd.Run() + c.Assert(err, IsNil) + + sourceChecksum, err := utils.ChecksumsForFile(largeFile) + c.Assert(err, IsNil) + + srcPoolPath, err := pool.Import(largeFile, "package.deb", + &utils.ChecksumInfo{MD5: "d41d8cd98f00b204e9800998ecf8427e"}, false, cs) + c.Assert(err, IsNil) + + err = storage.LinkFromPool("", "pool/main/p/package", "package.deb", + pool, srcPoolPath, sourceChecksum, false) + c.Assert(err, NotNil) + c.Check(strings.Contains(err.Error(), "no space left on device") || + strings.Contains(err.Error(), "sync"), Equals, true, + Commentf("Expected disk full error, got: %v", err)) +} + +type DiskFullNoRootSuite struct { + root string +} + +var _ = Suite(&DiskFullNoRootSuite{}) + +func (s *DiskFullNoRootSuite) SetUpTest(c *C) { + s.root = c.MkDir() +} + +func (s *DiskFullNoRootSuite) TestSyncIsCalled(c *C) { + storage := NewPublishedStorage(s.root, "", "") + sourceFile := filepath.Join(s.root, "source.txt") + err := os.WriteFile(sourceFile, []byte("test content"), 0644) + c.Assert(err, IsNil) + err = storage.PutFile("dest.txt", sourceFile) + c.Assert(err, IsNil) + content, err := os.ReadFile(filepath.Join(s.root, "dest.txt")) + c.Assert(err, IsNil) + c.Check(string(content), Equals, "test content") +} + +func (s *DiskFullNoRootSuite) TestLinkFromPoolCopySyncIsCalled(c *C) { + storage := NewPublishedStorage(s.root, "copy", "") + poolPath := filepath.Join(s.root, "pool") + pool := NewPackagePool(poolPath, false) + cs := NewMockChecksumStorage() + + pkgFile := filepath.Join(s.root, "package.deb") + err := os.WriteFile(pkgFile, []byte("package content"), 0644) + c.Assert(err, IsNil) + + sourceChecksum, err := utils.ChecksumsForFile(pkgFile) + c.Assert(err, IsNil) + + srcPoolPath, err := pool.Import(pkgFile, "package.deb", + &utils.ChecksumInfo{MD5: "d41d8cd98f00b204e9800998ecf8427e"}, false, cs) + c.Assert(err, IsNil) + + err = storage.LinkFromPool("", "pool/main/p/package", "package.deb", + pool, srcPoolPath, sourceChecksum, false) + c.Assert(err, IsNil) + + destPath := filepath.Join(s.root, "pool/main/p/package/package.deb") + content, err := os.ReadFile(destPath) + c.Assert(err, IsNil) + c.Check(string(content), Equals, "package content") +} + +func (s *DiskFullNoRootSuite) TestPutFileSyncErrorIsReturned(c *C) { + storage := NewPublishedStorage(s.root, "", "") + + sourceFile := filepath.Join(s.root, "source-syncfail.txt") + err := os.WriteFile(sourceFile, []byte("test content"), 0644) + c.Assert(err, IsNil) + + oldSyncFile := syncFile + syncFile = func(_ *os.File) error { return syscall.ENOSPC } + defer func() { syncFile = oldSyncFile }() + + err = storage.PutFile("dest-syncfail.txt", sourceFile) + c.Assert(err, NotNil) + c.Check(strings.Contains(err.Error(), "error syncing file"), Equals, true) +} + +func (s *DiskFullNoRootSuite) TestLinkFromPoolCopySyncErrorIsReturned(c *C) { + storage := NewPublishedStorage(s.root, "copy", "") + poolPath := filepath.Join(s.root, "pool") + pool := NewPackagePool(poolPath, false) + cs := NewMockChecksumStorage() + + pkgFile := filepath.Join(s.root, "package-syncfail.deb") + err := os.WriteFile(pkgFile, []byte("package content"), 0644) + c.Assert(err, IsNil) + + sourceChecksum, err := utils.ChecksumsForFile(pkgFile) + c.Assert(err, IsNil) + + srcPoolPath, err := pool.Import(pkgFile, "package-syncfail.deb", + &utils.ChecksumInfo{MD5: "d41d8cd98f00b204e9800998ecf8427e"}, false, cs) + c.Assert(err, IsNil) + + oldSyncFile := syncFile + syncFile = func(_ *os.File) error { return syscall.ENOSPC } + defer func() { syncFile = oldSyncFile }() + + err = storage.LinkFromPool("", "pool/main/p/package", "package-syncfail.deb", + pool, srcPoolPath, sourceChecksum, false) + c.Assert(err, NotNil) + c.Check(strings.Contains(err.Error(), "error syncing file"), Equals, true) +} + +func (s *DiskFullNoRootSuite) TestGetFileLockReusesMutex(c *C) { + a := getFileLock(filepath.Join(s.root, "a")) + b := getFileLock(filepath.Join(s.root, "a")) + c.Check(a == b, Equals, true) + + c1 := getFileLock(filepath.Join(s.root, "c1")) + c2 := getFileLock(filepath.Join(s.root, "c2")) + c.Check(c1 == c2, Equals, false) +} + +func (s *DiskFullNoRootSuite) TestPutFileFailsIfDestinationDirMissing(c *C) { + storage := NewPublishedStorage(s.root, "", "") + + sourceFile := filepath.Join(s.root, "src.txt") + err := os.WriteFile(sourceFile, []byte("x"), 0644) + c.Assert(err, IsNil) + + err = storage.PutFile("missingdir/dest.txt", sourceFile) + c.Assert(err, NotNil) +} + +func (s *DiskFullNoRootSuite) TestLinkFromPoolRejectsNonLocalPoolForHardlink(c *C) { + storage := NewPublishedStorage(s.root, "", "") + pool := &fakePool{} + + err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false) + c.Assert(err, NotNil) + c.Check(strings.Contains(err.Error(), "cannot link"), Equals, true) +} + +func (s *DiskFullNoRootSuite) TestLinkFromPoolCopyReturnsErrorIfOpenFails(c *C) { + storage := NewPublishedStorage(s.root, "copy", "") + pool := &fakePool{openFn: func(string) (aptly.ReadSeekerCloser, error) { return nil, io.ErrUnexpectedEOF }} + + err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false) + c.Assert(err, NotNil) +} + +func (s *DiskFullNoRootSuite) TestLinkFromPoolCopyReturnsErrorIfReaderCloseFails(c *C) { + storage := NewPublishedStorage(s.root, "copy", "") + + pool := &fakePool{openFn: func(string) (aptly.ReadSeekerCloser, error) { + return &fakeRSC{Reader: bytes.NewReader([]byte("data")), closeErr: io.ErrClosedPipe}, nil + }} + + err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false) + c.Assert(err, NotNil) + c.Check(err, Equals, io.ErrClosedPipe) +} + +func (s *DiskFullNoRootSuite) TestLinkFromPoolCopyReturnsErrorIfSizeFailsWhenDestExists(c *C) { + storage := NewPublishedStorage(s.root, "copy", "size") + pool := &fakePool{sizeErr: io.ErrUnexpectedEOF, openFn: func(string) (aptly.ReadSeekerCloser, error) { + return &fakeRSC{Reader: bytes.NewReader([]byte("data")), closeErr: nil}, nil + }} + + destDir := filepath.Join(s.root, "pool/main/p/pkg") + c.Assert(os.MkdirAll(destDir, 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(destDir, "x.deb"), []byte("old"), 0644), IsNil) + + err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false) + c.Assert(err, NotNil) + c.Check(err, Equals, io.ErrUnexpectedEOF) +} + +func (s *DiskFullNoRootSuite) TestLinkFromPoolCopyChecksumReturnsErrorIfDstMD5Fails(c *C) { + storage := NewPublishedStorage(s.root, "copy", "") + pool := &fakePool{openFn: func(string) (aptly.ReadSeekerCloser, error) { + return &fakeRSC{Reader: bytes.NewReader([]byte("data")), closeErr: nil}, nil + }} + + // Make destinationPath a directory so MD5ChecksumForFile fails. + destDir := filepath.Join(s.root, "pool/main/p/pkg") + c.Assert(os.MkdirAll(destDir, 0777), IsNil) + c.Assert(os.MkdirAll(filepath.Join(destDir, "x.deb"), 0777), IsNil) + + err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false) + c.Assert(err, NotNil) +} + +func (s *DiskFullNoRootSuite) TestLinkFromPoolHardlinkReturnsErrorIfStatFailsWhenDestExists(c *C) { + storage := NewPublishedStorage(c.MkDir(), "hardlink", "") + pool := &fakeLocalPool{statErr: errors.New("stat failed")} + + destDir := filepath.Join(storage.rootPath, "pool", "main", "p", "pkg") + c.Assert(os.MkdirAll(destDir, 0777), IsNil) + c.Assert(os.WriteFile(filepath.Join(destDir, "x.deb"), []byte("x"), 0644), IsNil) + + err := storage.LinkFromPool("", "pool/main/p/pkg", "x.deb", pool, "x", utils.ChecksumInfo{MD5: "x"}, false) + c.Assert(err, NotNil) +}