From aaa809890e1038e7266b0cb4ea4eef93bc463b0c Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Thu, 21 Jul 2022 21:44:32 +0200 Subject: [PATCH 01/32] Added abstract helper interface and implemented it for all storage backends --- cmd/backup/helper.go | 35 +++++ cmd/backup/local.go | 109 +++++++++++++++ cmd/backup/minio.go | 85 ++++++++++++ cmd/backup/script.go | 308 ++++--------------------------------------- cmd/backup/ssh.go | 105 +++++++++++++++ cmd/backup/webdav.go | 74 +++++++++++ 6 files changed, 434 insertions(+), 282 deletions(-) create mode 100644 cmd/backup/helper.go create mode 100644 cmd/backup/local.go create mode 100644 cmd/backup/minio.go create mode 100644 cmd/backup/ssh.go create mode 100644 cmd/backup/webdav.go diff --git a/cmd/backup/helper.go b/cmd/backup/helper.go new file mode 100644 index 00000000..543792cf --- /dev/null +++ b/cmd/backup/helper.go @@ -0,0 +1,35 @@ +package main + +import "time" + +type Helper interface { + copyArchive(s *script, name string) error + pruneBackups(s *script, deadline time.Time) error +} + +type AbstractHelper struct { + Helper +} + +// doPrune holds general control flow that applies to any kind of storage. +// Callers can pass in a thunk that performs the actual deletion of files. +func doPrune(s *script, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { + if lenMatches != 0 && lenMatches != lenCandidates { + if err := doRemoveFiles(); err != nil { + return err + } + s.logger.Infof( + "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", + lenMatches, + lenCandidates, + description, + s.c.BackupRetentionDays, + ) + } else if lenMatches != 0 && lenMatches == lenCandidates { + s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) + s.logger.Warn("Refusing to do so, please check your configuration.") + } else { + s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) + } + return nil +} diff --git a/cmd/backup/local.go b/cmd/backup/local.go new file mode 100644 index 00000000..5db1f181 --- /dev/null +++ b/cmd/backup/local.go @@ -0,0 +1,109 @@ +package main + +import ( + "fmt" + "os" + "path" + "path/filepath" + "time" +) + +type LocalHelper struct { + *AbstractHelper +} + +func newLocalhelper() *LocalHelper { + a := &AbstractHelper{} + r := &LocalHelper{a} + a.Helper = r + return r +} + +func (helper *LocalHelper) copyArchive(s *script, name string) error { + if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil { + return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) + } + s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive) + if s.c.BackupLatestSymlink != "" { + symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink) + if _, err := os.Lstat(symlink); err == nil { + os.Remove(symlink) + } + if err := os.Symlink(name, symlink); err != nil { + return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) + } + s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink) + } + + return nil +} + +func (helper *LocalHelper) pruneBackups(s *script, deadline time.Time) error { + globPattern := path.Join( + s.c.BackupArchive, + fmt.Sprintf("%s*", s.c.BackupPruningPrefix), + ) + globMatches, err := filepath.Glob(globPattern) + if err != nil { + return fmt.Errorf( + "pruneBackups: error looking up matching files using pattern %s: %w", + globPattern, + err, + ) + } + + var candidates []string + for _, candidate := range globMatches { + fi, err := os.Lstat(candidate) + if err != nil { + return fmt.Errorf( + "pruneBackups: error calling Lstat on file %s: %w", + candidate, + err, + ) + } + + if fi.Mode()&os.ModeSymlink != os.ModeSymlink { + candidates = append(candidates, candidate) + } + } + + var matches []string + for _, candidate := range candidates { + fi, err := os.Stat(candidate) + if err != nil { + return fmt.Errorf( + "pruneBackups: error calling stat on file %s: %w", + candidate, + err, + ) + } + if fi.ModTime().Before(deadline) { + matches = append(matches, candidate) + } + } + + s.stats.Storages.Local = StorageStats{ + Total: uint(len(candidates)), + Pruned: uint(len(matches)), + } + + doPrune(s, len(matches), len(candidates), "local backup(s)", func() error { + var removeErrors []error + for _, match := range matches { + if err := os.Remove(match); err != nil { + removeErrors = append(removeErrors, err) + } + } + if len(removeErrors) != 0 { + return fmt.Errorf( + "pruneBackups: %d error(s) deleting local files, starting with: %w", + len(removeErrors), + join(removeErrors...), + ) + } + return nil + }) + + return nil +} diff --git a/cmd/backup/minio.go b/cmd/backup/minio.go new file mode 100644 index 00000000..d865dc13 --- /dev/null +++ b/cmd/backup/minio.go @@ -0,0 +1,85 @@ +package main + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/minio/minio-go/v7" +) + +type MinioHelper struct { + *AbstractHelper + client *minio.Client +} + +func newMinioHelper(client *minio.Client) *MinioHelper { + a := &AbstractHelper{} + r := &MinioHelper{a, client} + a.Helper = r + return r +} + +func (helper *MinioHelper) copyArchive(s *script, name string) error { + if _, err := helper.client.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{ + ContentType: "application/tar+gzip", + StorageClass: s.c.AwsStorageClass, + }); err != nil { + return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err) + } + s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName) + + return nil +} + +func (helper *MinioHelper) pruneBackups(s *script, deadline time.Time) error { + candidates := helper.client.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{ + WithMetadata: true, + Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix), + Recursive: true, + }) + + var matches []minio.ObjectInfo + var lenCandidates int + for candidate := range candidates { + lenCandidates++ + if candidate.Err != nil { + return fmt.Errorf( + "pruneBackups: error looking up candidates from remote storage: %w", + candidate.Err, + ) + } + if candidate.LastModified.Before(deadline) { + matches = append(matches, candidate) + } + } + + s.stats.Storages.S3 = StorageStats{ + Total: uint(lenCandidates), + Pruned: uint(len(matches)), + } + + doPrune(s, len(matches), lenCandidates, "remote backup(s)", func() error { + objectsCh := make(chan minio.ObjectInfo) + go func() { + for _, match := range matches { + objectsCh <- match + } + close(objectsCh) + }() + errChan := helper.client.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) + var removeErrors []error + for result := range errChan { + if result.Err != nil { + removeErrors = append(removeErrors, result.Err) + } + } + if len(removeErrors) != 0 { + return join(removeErrors...) + } + return nil + }) + + return nil +} diff --git a/cmd/backup/script.go b/cmd/backup/script.go index cd08361d..def733c8 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -14,7 +14,6 @@ import ( "os" "path" "path/filepath" - "strings" "text/template" "time" @@ -40,9 +39,10 @@ import ( // single backup run. type script struct { cli *client.Client - minioClient *minio.Client - webdavClient *gowebdav.Client - sshClient *ssh.Client + minioHelper *MinioHelper + webdavHelper *WebdavHelper + sshHelper *SshHelper + localHelper *LocalHelper sftpClient *sftp.Client logger *logrus.Logger sender *router.ServiceRouter @@ -143,7 +143,7 @@ func newScript() (*script, error) { if err != nil { return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) } - s.minioClient = mc + s.minioHelper = newMinioHelper(mc) } if s.c.WebdavUrl != "" { @@ -151,7 +151,7 @@ func newScript() (*script, error) { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") } else { webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword) - s.webdavClient = webdavClient + s.webdavHelper = newWebdavHelper(webdavClient) if s.c.WebdavUrlInsecure { defaultTransport, ok := http.DefaultTransport.(*http.Transport) if !ok { @@ -159,7 +159,7 @@ func newScript() (*script, error) { } webdavTransport := defaultTransport.Clone() webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure - s.webdavClient.SetTransport(webdavTransport) + s.webdavHelper.client.SetTransport(webdavTransport) } } } @@ -199,11 +199,11 @@ func newScript() (*script, error) { HostKeyCallback: ssh.InsecureIgnoreHostKey(), } sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig) - s.sshClient = sshClient + s.sshHelper = newSshHelper(sshClient) if err != nil { return nil, fmt.Errorf("newScript: error creating ssh client: %w", err) } - _, _, err = s.sshClient.SendRequest("keepalive", false, nil) + _, _, err = s.sshHelper.client.SendRequest("keepalive", false, nil) if err != nil { return nil, err } @@ -215,6 +215,8 @@ func newScript() (*script, error) { } } + s.localHelper = newLocalhelper() + if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( "smtp://%s:%s@%s:%d/?from=%s&to=%s", @@ -529,92 +531,20 @@ func (s *script) copyArchive() error { } } - if s.minioClient != nil { - if _, err := s.minioClient.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{ - ContentType: "application/tar+gzip", - StorageClass: s.c.AwsStorageClass, - }); err != nil { - errResp := minio.ToErrorResponse(err) - return fmt.Errorf("copyBackup: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) - } - s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName) + if s.minioHelper != nil { + s.minioHelper.copyArchive(s, name) } - if s.webdavClient != nil { - bytes, err := os.ReadFile(s.file) - if err != nil { - return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) - } - if err := s.webdavClient.MkdirAll(s.c.WebdavPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err) - } - if err := s.webdavClient.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil { - return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) - } - s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath) + if s.webdavHelper != nil { + s.webdavHelper.copyArchive(s, name) } - if s.sshClient != nil { - source, err := os.Open(s.file) - if err != nil { - return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) - } - defer source.Close() - - destination, err := s.sftpClient.Create(filepath.Join(s.c.SSHRemotePath, name)) - if err != nil { - return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) - } - defer destination.Close() - - chunk := make([]byte, 1000000) - for { - num, err := source.Read(chunk) - if err == io.EOF { - tot, err := destination.Write(chunk[:num]) - if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) - } - - if tot != len(chunk[:num]) { - return fmt.Errorf("sshClient: failed to write stream") - } - - break - } - - if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) - } - - tot, err := destination.Write(chunk[:num]) - if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) - } - - if tot != len(chunk[:num]) { - return fmt.Errorf("sshClient: failed to write stream") - } - } - - s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", s.file, s.c.SSHHostName, s.c.SSHRemotePath) + if s.sshHelper != nil { + s.sshHelper.copyArchive(s, name) } if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil { - return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) - } - s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive) - if s.c.BackupLatestSymlink != "" { - symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink) - if _, err := os.Lstat(symlink); err == nil { - os.Remove(symlink) - } - if err := os.Symlink(name, symlink); err != nil { - return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) - } - s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink) - } + s.localHelper.copyArchive(s, name) } return nil } @@ -629,208 +559,22 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) - // doPrune holds general control flow that applies to any kind of storage. - // Callers can pass in a thunk that performs the actual deletion of files. - var doPrune = func(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { - if lenMatches != 0 && lenMatches != lenCandidates { - if err := doRemoveFiles(); err != nil { - return err - } - s.logger.Infof( - "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", - lenMatches, - lenCandidates, - description, - s.c.BackupRetentionDays, - ) - } else if lenMatches != 0 && lenMatches == lenCandidates { - s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) - s.logger.Warn("Refusing to do so, please check your configuration.") - } else { - s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) - } - return nil - } - - if s.minioClient != nil { - candidates := s.minioClient.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{ - WithMetadata: true, - Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix), - Recursive: true, - }) - - var matches []minio.ObjectInfo - var lenCandidates int - for candidate := range candidates { - lenCandidates++ - if candidate.Err != nil { - return fmt.Errorf( - "pruneBackups: error looking up candidates from remote storage: %w", - candidate.Err, - ) - } - if candidate.LastModified.Before(deadline) { - matches = append(matches, candidate) - } - } - - s.stats.Storages.S3 = StorageStats{ - Total: uint(lenCandidates), - Pruned: uint(len(matches)), - } - - doPrune(len(matches), lenCandidates, "remote backup(s)", func() error { - objectsCh := make(chan minio.ObjectInfo) - go func() { - for _, match := range matches { - objectsCh <- match - } - close(objectsCh) - }() - errChan := s.minioClient.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) - var removeErrors []error - for result := range errChan { - if result.Err != nil { - removeErrors = append(removeErrors, result.Err) - } - } - if len(removeErrors) != 0 { - return join(removeErrors...) - } - return nil - }) + if s.minioHelper != nil { + s.minioHelper.pruneBackups(s, deadline) } - if s.webdavClient != nil { - candidates, err := s.webdavClient.ReadDir(s.c.WebdavPath) - if err != nil { - return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) - } - var matches []fs.FileInfo - var lenCandidates int - for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) { - continue - } - lenCandidates++ - if candidate.ModTime().Before(deadline) { - matches = append(matches, candidate) - } - } - - s.stats.Storages.WebDAV = StorageStats{ - Total: uint(lenCandidates), - Pruned: uint(len(matches)), - } - - doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { - for _, match := range matches { - if err := s.webdavClient.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil { - return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) - } - } - return nil - }) + if s.webdavHelper != nil { + s.webdavHelper.pruneBackups(s, deadline) } - if s.sshClient != nil { - candidates, err := s.sftpClient.ReadDir(s.c.SSHRemotePath) - if err != nil { - return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) - } - - var matches []string - for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) { - continue - } - if candidate.ModTime().Before(deadline) { - matches = append(matches, candidate.Name()) - } - } - - s.stats.Storages.SSH = StorageStats{ - Total: uint(len(candidates)), - Pruned: uint(len(matches)), - } - - doPrune(len(matches), len(candidates), "SSH backup(s)", func() error { - for _, match := range matches { - if err := s.sftpClient.Remove(filepath.Join(s.c.SSHRemotePath, match)); err != nil { - return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) - } - } - return nil - }) + if s.sshHelper != nil { + s.sshHelper.pruneBackups(s, deadline) } if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - globPattern := path.Join( - s.c.BackupArchive, - fmt.Sprintf("%s*", s.c.BackupPruningPrefix), - ) - globMatches, err := filepath.Glob(globPattern) - if err != nil { - return fmt.Errorf( - "pruneBackups: error looking up matching files using pattern %s: %w", - globPattern, - err, - ) - } - - var candidates []string - for _, candidate := range globMatches { - fi, err := os.Lstat(candidate) - if err != nil { - return fmt.Errorf( - "pruneBackups: error calling Lstat on file %s: %w", - candidate, - err, - ) - } - - if fi.Mode()&os.ModeSymlink != os.ModeSymlink { - candidates = append(candidates, candidate) - } - } - - var matches []string - for _, candidate := range candidates { - fi, err := os.Stat(candidate) - if err != nil { - return fmt.Errorf( - "pruneBackups: error calling stat on file %s: %w", - candidate, - err, - ) - } - if fi.ModTime().Before(deadline) { - matches = append(matches, candidate) - } - } - - s.stats.Storages.Local = StorageStats{ - Total: uint(len(candidates)), - Pruned: uint(len(matches)), - } - - doPrune(len(matches), len(candidates), "local backup(s)", func() error { - var removeErrors []error - for _, match := range matches { - if err := os.Remove(match); err != nil { - removeErrors = append(removeErrors, err) - } - } - if len(removeErrors) != 0 { - return fmt.Errorf( - "pruneBackups: %d error(s) deleting local files, starting with: %w", - len(removeErrors), - join(removeErrors...), - ) - } - return nil - }) + s.localHelper.pruneBackups(s, deadline) } + return nil } diff --git a/cmd/backup/ssh.go b/cmd/backup/ssh.go new file mode 100644 index 00000000..b574bfee --- /dev/null +++ b/cmd/backup/ssh.go @@ -0,0 +1,105 @@ +package main + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/crypto/ssh" +) + +type SshHelper struct { + *AbstractHelper + client *ssh.Client +} + +func newSshHelper(client *ssh.Client) *SshHelper { + a := &AbstractHelper{} + r := &SshHelper{a, client} + a.Helper = r + return r +} + +func (helper *SshHelper) copyArchive(s *script, name string) error { + source, err := os.Open(s.file) + if err != nil { + return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) + } + defer source.Close() + + destination, err := s.sftpClient.Create(filepath.Join(s.c.SSHRemotePath, name)) + if err != nil { + return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) + } + defer destination.Close() + + chunk := make([]byte, 1000000) + for { + num, err := source.Read(chunk) + if err == io.EOF { + tot, err := destination.Write(chunk[:num]) + if err != nil { + return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) + } + + if tot != len(chunk[:num]) { + return fmt.Errorf("sshClient: failed to write stream") + } + + break + } + + if err != nil { + return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) + } + + tot, err := destination.Write(chunk[:num]) + if err != nil { + return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) + } + + if tot != len(chunk[:num]) { + return fmt.Errorf("sshClient: failed to write stream") + } + } + + s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", s.file, s.c.SSHHostName, s.c.SSHRemotePath) + + return nil +} + +func (helper *SshHelper) pruneBackups(s *script, deadline time.Time) error { + candidates, err := s.sftpClient.ReadDir(s.c.SSHRemotePath) + if err != nil { + return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) + } + + var matches []string + for _, candidate := range candidates { + if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) { + continue + } + if candidate.ModTime().Before(deadline) { + matches = append(matches, candidate.Name()) + } + } + + s.stats.Storages.SSH = StorageStats{ + Total: uint(len(candidates)), + Pruned: uint(len(matches)), + } + + doPrune(s, len(matches), len(candidates), "SSH backup(s)", func() error { + for _, match := range matches { + if err := s.sftpClient.Remove(filepath.Join(s.c.SSHRemotePath, match)); err != nil { + return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) + } + } + return nil + }) + + return nil +} diff --git a/cmd/backup/webdav.go b/cmd/backup/webdav.go new file mode 100644 index 00000000..1a266501 --- /dev/null +++ b/cmd/backup/webdav.go @@ -0,0 +1,74 @@ +package main + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "time" + + "github.com/studio-b12/gowebdav" +) + +type WebdavHelper struct { + *AbstractHelper + client *gowebdav.Client +} + +func newWebdavHelper(client *gowebdav.Client) *WebdavHelper { + a := &AbstractHelper{} + r := &WebdavHelper{a, client} + a.Helper = r + return r +} + +func (helper *WebdavHelper) copyArchive(s *script, name string) error { + bytes, err := os.ReadFile(s.file) + if err != nil { + return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) + } + if err := helper.client.MkdirAll(s.c.WebdavPath, 0644); err != nil { + return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err) + } + if err := helper.client.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil { + return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) + } + s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath) + + return nil +} + +func (helper *WebdavHelper) pruneBackups(s *script, deadline time.Time) error { + candidates, err := helper.client.ReadDir(s.c.WebdavPath) + if err != nil { + return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) + } + var matches []fs.FileInfo + var lenCandidates int + for _, candidate := range candidates { + if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) { + continue + } + lenCandidates++ + if candidate.ModTime().Before(deadline) { + matches = append(matches, candidate) + } + } + + s.stats.Storages.WebDAV = StorageStats{ + Total: uint(lenCandidates), + Pruned: uint(len(matches)), + } + + doPrune(s, len(matches), lenCandidates, "WebDAV backup(s)", func() error { + for _, match := range matches { + if err := helper.client.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil { + return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) + } + } + return nil + }) + + return nil +} From 2ac2fd97d749975286a9bd1a44843861a82e4272 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Thu, 21 Jul 2022 21:55:41 +0200 Subject: [PATCH 02/32] Moved storage client initializations also to helper classes --- cmd/backup/minio.go | 44 ++++++++++++++++-- cmd/backup/script.go | 106 +++---------------------------------------- cmd/backup/ssh.go | 58 +++++++++++++++++++++-- cmd/backup/webdav.go | 28 ++++++++++-- 4 files changed, 125 insertions(+), 111 deletions(-) diff --git a/cmd/backup/minio.go b/cmd/backup/minio.go index d865dc13..897ef7ec 100644 --- a/cmd/backup/minio.go +++ b/cmd/backup/minio.go @@ -2,11 +2,13 @@ package main import ( "context" + "errors" "fmt" "path/filepath" "time" "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) type MinioHelper struct { @@ -14,11 +16,47 @@ type MinioHelper struct { client *minio.Client } -func newMinioHelper(client *minio.Client) *MinioHelper { +func newMinioHelper(s *script) (*MinioHelper, error) { + var creds *credentials.Credentials + if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" { + creds = credentials.NewStaticV4( + s.c.AwsAccessKeyID, + s.c.AwsSecretAccessKey, + "", + ) + } else if s.c.AwsIamRoleEndpoint != "" { + creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint) + } else { + return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") + } + + options := minio.Options{ + Creds: creds, + Secure: s.c.AwsEndpointProto == "https", + } + + if s.c.AwsEndpointInsecure { + if !options.Secure { + return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https") + } + + transport, err := minio.DefaultTransport(true) + if err != nil { + return nil, fmt.Errorf("newScript: failed to create default minio transport") + } + transport.TLSClientConfig.InsecureSkipVerify = true + options.Transport = transport + } + + mc, err := minio.New(s.c.AwsEndpoint, &options) + if err != nil { + return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) + } + a := &AbstractHelper{} - r := &MinioHelper{a, client} + r := &MinioHelper{a, mc} a.Helper = r - return r + return r, nil } func (helper *MinioHelper) copyArchive(s *script, name string) error { diff --git a/cmd/backup/script.go b/cmd/backup/script.go index def733c8..1fe7b2aa 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -5,12 +5,9 @@ package main import ( "context" - "errors" "fmt" "io" "io/fs" - "io/ioutil" - "net/http" "os" "path" "path/filepath" @@ -25,14 +22,10 @@ import ( "github.com/docker/docker/client" "github.com/kelseyhightower/envconfig" "github.com/leekchan/timeutil" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" "github.com/otiai10/copy" "github.com/pkg/sftp" "github.com/sirupsen/logrus" - "github.com/studio-b12/gowebdav" "golang.org/x/crypto/openpgp" - "golang.org/x/crypto/ssh" ) // script holds all the stateful information required to orchestrate a @@ -108,111 +101,24 @@ func newScript() (*script, error) { } if s.c.AwsS3BucketName != "" { - var creds *credentials.Credentials - if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" { - creds = credentials.NewStaticV4( - s.c.AwsAccessKeyID, - s.c.AwsSecretAccessKey, - "", - ) - } else if s.c.AwsIamRoleEndpoint != "" { - creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint) - } else { - return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") - } - - options := minio.Options{ - Creds: creds, - Secure: s.c.AwsEndpointProto == "https", - } - - if s.c.AwsEndpointInsecure { - if !options.Secure { - return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https") - } - - transport, err := minio.DefaultTransport(true) - if err != nil { - return nil, fmt.Errorf("newScript: failed to create default minio transport") - } - transport.TLSClientConfig.InsecureSkipVerify = true - options.Transport = transport - } - - mc, err := minio.New(s.c.AwsEndpoint, &options) + s.minioHelper, err = newMinioHelper(s) if err != nil { - return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) + return nil, err } - s.minioHelper = newMinioHelper(mc) } if s.c.WebdavUrl != "" { - if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" { - return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") - } else { - webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword) - s.webdavHelper = newWebdavHelper(webdavClient) - if s.c.WebdavUrlInsecure { - defaultTransport, ok := http.DefaultTransport.(*http.Transport) - if !ok { - return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") - } - webdavTransport := defaultTransport.Clone() - webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure - s.webdavHelper.client.SetTransport(webdavTransport) - } + s.webdavHelper, err = newWebdavHelper(s) + if err != nil { + return nil, err } } if s.c.SSHHostName != "" { - var authMethods []ssh.AuthMethod - - if s.c.SSHPassword != "" { - authMethods = append(authMethods, ssh.Password(s.c.SSHPassword)) - } - - if _, err := os.Stat(s.c.SSHIdentityFile); err == nil { - key, err := ioutil.ReadFile(s.c.SSHIdentityFile) - if err != nil { - return nil, errors.New("newScript: error reading the private key") - } - - var signer ssh.Signer - if s.c.SSHIdentityPassphrase != "" { - signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(s.c.SSHIdentityPassphrase)) - if err != nil { - return nil, errors.New("newScript: error parsing the encrypted private key") - } - authMethods = append(authMethods, ssh.PublicKeys(signer)) - } else { - signer, err = ssh.ParsePrivateKey(key) - if err != nil { - return nil, errors.New("newScript: error parsing the private key") - } - authMethods = append(authMethods, ssh.PublicKeys(signer)) - } - } - - sshClientConfig := &ssh.ClientConfig{ - User: s.c.SSHUser, - Auth: authMethods, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - } - sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig) - s.sshHelper = newSshHelper(sshClient) - if err != nil { - return nil, fmt.Errorf("newScript: error creating ssh client: %w", err) - } - _, _, err = s.sshHelper.client.SendRequest("keepalive", false, nil) + s.sshHelper, err = newSshHelper(s) if err != nil { return nil, err } - - sftpClient, err := sftp.NewClient(sshClient) - s.sftpClient = sftpClient - if err != nil { - return nil, fmt.Errorf("newScript: error creating sftp client: %w", err) - } } s.localHelper = newLocalhelper() diff --git a/cmd/backup/ssh.go b/cmd/backup/ssh.go index b574bfee..31d1721a 100644 --- a/cmd/backup/ssh.go +++ b/cmd/backup/ssh.go @@ -1,13 +1,16 @@ package main import ( + "errors" "fmt" "io" + "io/ioutil" "os" "path/filepath" "strings" "time" + "github.com/pkg/sftp" "golang.org/x/crypto/ssh" ) @@ -16,11 +19,60 @@ type SshHelper struct { client *ssh.Client } -func newSshHelper(client *ssh.Client) *SshHelper { +func newSshHelper(s *script) (*SshHelper, error) { + var authMethods []ssh.AuthMethod + + if s.c.SSHPassword != "" { + authMethods = append(authMethods, ssh.Password(s.c.SSHPassword)) + } + + if _, err := os.Stat(s.c.SSHIdentityFile); err == nil { + key, err := ioutil.ReadFile(s.c.SSHIdentityFile) + if err != nil { + return nil, errors.New("newScript: error reading the private key") + } + + var signer ssh.Signer + if s.c.SSHIdentityPassphrase != "" { + signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(s.c.SSHIdentityPassphrase)) + if err != nil { + return nil, errors.New("newScript: error parsing the encrypted private key") + } + authMethods = append(authMethods, ssh.PublicKeys(signer)) + } else { + signer, err = ssh.ParsePrivateKey(key) + if err != nil { + return nil, errors.New("newScript: error parsing the private key") + } + authMethods = append(authMethods, ssh.PublicKeys(signer)) + } + } + + sshClientConfig := &ssh.ClientConfig{ + User: s.c.SSHUser, + Auth: authMethods, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig) + + if err != nil { + return nil, fmt.Errorf("newScript: error creating ssh client: %w", err) + } + _, _, err = s.sshHelper.client.SendRequest("keepalive", false, nil) + if err != nil { + return nil, err + } + + sftpClient, err := sftp.NewClient(sshClient) + s.sftpClient = sftpClient + if err != nil { + return nil, fmt.Errorf("newScript: error creating sftp client: %w", err) + } + a := &AbstractHelper{} - r := &SshHelper{a, client} + r := &SshHelper{a, sshClient} a.Helper = r - return r + return r, nil } func (helper *SshHelper) copyArchive(s *script, name string) error { diff --git a/cmd/backup/webdav.go b/cmd/backup/webdav.go index 1a266501..a4bde2da 100644 --- a/cmd/backup/webdav.go +++ b/cmd/backup/webdav.go @@ -1,8 +1,10 @@ package main import ( + "errors" "fmt" "io/fs" + "net/http" "os" "path/filepath" "strings" @@ -16,11 +18,27 @@ type WebdavHelper struct { client *gowebdav.Client } -func newWebdavHelper(client *gowebdav.Client) *WebdavHelper { - a := &AbstractHelper{} - r := &WebdavHelper{a, client} - a.Helper = r - return r +func newWebdavHelper(s *script) (*WebdavHelper, error) { + if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" { + return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") + } else { + webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword) + + if s.c.WebdavUrlInsecure { + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") + } + webdavTransport := defaultTransport.Clone() + webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure + webdavClient.SetTransport(webdavTransport) + } + + a := &AbstractHelper{} + r := &WebdavHelper{a, webdavClient} + a.Helper = r + return r, nil + } } func (helper *WebdavHelper) copyArchive(s *script, name string) error { From 3ceda07ae48c682bc72ce49a99f358ac6dae4fbb Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Thu, 21 Jul 2022 22:09:08 +0200 Subject: [PATCH 03/32] Fixed ssh init issue --- cmd/backup/ssh.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/backup/ssh.go b/cmd/backup/ssh.go index 31d1721a..42e289cf 100644 --- a/cmd/backup/ssh.go +++ b/cmd/backup/ssh.go @@ -58,7 +58,7 @@ func newSshHelper(s *script) (*SshHelper, error) { if err != nil { return nil, fmt.Errorf("newScript: error creating ssh client: %w", err) } - _, _, err = s.sshHelper.client.SendRequest("keepalive", false, nil) + _, _, err = sshClient.SendRequest("keepalive", false, nil) if err != nil { return nil, err } From 6ae3acc1d35732e0c7586b752d25c55ff5314f65 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Thu, 21 Jul 2022 23:03:33 +0200 Subject: [PATCH 04/32] Moved script parameter to helper struct to simplify script init. --- cmd/backup/helper.go | 4 ++-- cmd/backup/local.go | 27 ++++++++++++++------------- cmd/backup/minio.go | 27 ++++++++++++++++----------- cmd/backup/script.go | 39 +++++++++++++++------------------------ cmd/backup/ssh.go | 27 ++++++++++++++++----------- cmd/backup/webdav.go | 31 ++++++++++++++++++------------- 6 files changed, 81 insertions(+), 74 deletions(-) diff --git a/cmd/backup/helper.go b/cmd/backup/helper.go index 543792cf..57d88c85 100644 --- a/cmd/backup/helper.go +++ b/cmd/backup/helper.go @@ -3,8 +3,8 @@ package main import "time" type Helper interface { - copyArchive(s *script, name string) error - pruneBackups(s *script, deadline time.Time) error + copyArchive(name string) error + pruneBackups(deadline time.Time) error } type AbstractHelper struct { diff --git a/cmd/backup/local.go b/cmd/backup/local.go index 5db1f181..34f93043 100644 --- a/cmd/backup/local.go +++ b/cmd/backup/local.go @@ -10,38 +10,39 @@ import ( type LocalHelper struct { *AbstractHelper + s *script } -func newLocalhelper() *LocalHelper { +func newLocalhelper(s *script) *LocalHelper { a := &AbstractHelper{} - r := &LocalHelper{a} + r := &LocalHelper{a, s} a.Helper = r return r } -func (helper *LocalHelper) copyArchive(s *script, name string) error { - if err := copyFile(s.file, path.Join(s.c.BackupArchive, name)); err != nil { +func (helper *LocalHelper) copyArchive(name string) error { + if err := copyFile(helper.s.file, path.Join(helper.s.c.BackupArchive, name)); err != nil { return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) } - s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", s.file, s.c.BackupArchive) - if s.c.BackupLatestSymlink != "" { - symlink := path.Join(s.c.BackupArchive, s.c.BackupLatestSymlink) + helper.s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", helper.s.file, helper.s.c.BackupArchive) + if helper.s.c.BackupLatestSymlink != "" { + symlink := path.Join(helper.s.c.BackupArchive, helper.s.c.BackupLatestSymlink) if _, err := os.Lstat(symlink); err == nil { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) } - s.logger.Infof("Created/Updated symlink `%s` for latest backup.", s.c.BackupLatestSymlink) + helper.s.logger.Infof("Created/Updated symlink `%s` for latest backup.", helper.s.c.BackupLatestSymlink) } return nil } -func (helper *LocalHelper) pruneBackups(s *script, deadline time.Time) error { +func (helper *LocalHelper) pruneBackups(deadline time.Time) error { globPattern := path.Join( - s.c.BackupArchive, - fmt.Sprintf("%s*", s.c.BackupPruningPrefix), + helper.s.c.BackupArchive, + fmt.Sprintf("%s*", helper.s.c.BackupPruningPrefix), ) globMatches, err := filepath.Glob(globPattern) if err != nil { @@ -83,12 +84,12 @@ func (helper *LocalHelper) pruneBackups(s *script, deadline time.Time) error { } } - s.stats.Storages.Local = StorageStats{ + helper.s.stats.Storages.Local = StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } - doPrune(s, len(matches), len(candidates), "local backup(s)", func() error { + doPrune(helper.s, len(matches), len(candidates), "local backup(s)", func() error { var removeErrors []error for _, match := range matches { if err := os.Remove(match); err != nil { diff --git a/cmd/backup/minio.go b/cmd/backup/minio.go index 897ef7ec..f6ef59a3 100644 --- a/cmd/backup/minio.go +++ b/cmd/backup/minio.go @@ -14,9 +14,14 @@ import ( type MinioHelper struct { *AbstractHelper client *minio.Client + s *script } func newMinioHelper(s *script) (*MinioHelper, error) { + if s.c.AwsS3BucketName == "" { + return nil, nil + } + var creds *credentials.Credentials if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" { creds = credentials.NewStaticV4( @@ -54,27 +59,27 @@ func newMinioHelper(s *script) (*MinioHelper, error) { } a := &AbstractHelper{} - r := &MinioHelper{a, mc} + r := &MinioHelper{a, mc, s} a.Helper = r return r, nil } -func (helper *MinioHelper) copyArchive(s *script, name string) error { - if _, err := helper.client.FPutObject(context.Background(), s.c.AwsS3BucketName, filepath.Join(s.c.AwsS3Path, name), s.file, minio.PutObjectOptions{ +func (helper *MinioHelper) copyArchive(name string) error { + if _, err := helper.client.FPutObject(context.Background(), helper.s.c.AwsS3BucketName, filepath.Join(helper.s.c.AwsS3Path, name), helper.s.file, minio.PutObjectOptions{ ContentType: "application/tar+gzip", - StorageClass: s.c.AwsStorageClass, + StorageClass: helper.s.c.AwsStorageClass, }); err != nil { return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err) } - s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", s.file, s.c.AwsS3BucketName) + helper.s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", helper.s.file, helper.s.c.AwsS3BucketName) return nil } -func (helper *MinioHelper) pruneBackups(s *script, deadline time.Time) error { - candidates := helper.client.ListObjects(context.Background(), s.c.AwsS3BucketName, minio.ListObjectsOptions{ +func (helper *MinioHelper) pruneBackups(deadline time.Time) error { + candidates := helper.client.ListObjects(context.Background(), helper.s.c.AwsS3BucketName, minio.ListObjectsOptions{ WithMetadata: true, - Prefix: filepath.Join(s.c.AwsS3Path, s.c.BackupPruningPrefix), + Prefix: filepath.Join(helper.s.c.AwsS3Path, helper.s.c.BackupPruningPrefix), Recursive: true, }) @@ -93,12 +98,12 @@ func (helper *MinioHelper) pruneBackups(s *script, deadline time.Time) error { } } - s.stats.Storages.S3 = StorageStats{ + helper.s.stats.Storages.S3 = StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } - doPrune(s, len(matches), lenCandidates, "remote backup(s)", func() error { + doPrune(helper.s, len(matches), lenCandidates, "remote backup(s)", func() error { objectsCh := make(chan minio.ObjectInfo) go func() { for _, match := range matches { @@ -106,7 +111,7 @@ func (helper *MinioHelper) pruneBackups(s *script, deadline time.Time) error { } close(objectsCh) }() - errChan := helper.client.RemoveObjects(context.Background(), s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) + errChan := helper.client.RemoveObjects(context.Background(), helper.s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) var removeErrors []error for result := range errChan { if result.Err != nil { diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 1fe7b2aa..776751cd 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -100,28 +100,19 @@ func newScript() (*script, error) { s.cli = cli } - if s.c.AwsS3BucketName != "" { - s.minioHelper, err = newMinioHelper(s) - if err != nil { - return nil, err - } + if s.minioHelper, err = newMinioHelper(s); err != nil { + return nil, err } - if s.c.WebdavUrl != "" { - s.webdavHelper, err = newWebdavHelper(s) - if err != nil { - return nil, err - } + if s.webdavHelper, err = newWebdavHelper(s); err != nil { + return nil, err } - if s.c.SSHHostName != "" { - s.sshHelper, err = newSshHelper(s) - if err != nil { - return nil, err - } + if s.sshHelper, err = newSshHelper(s); err != nil { + return nil, err } - s.localHelper = newLocalhelper() + s.localHelper = newLocalhelper(s) if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( @@ -438,19 +429,19 @@ func (s *script) copyArchive() error { } if s.minioHelper != nil { - s.minioHelper.copyArchive(s, name) + s.minioHelper.copyArchive(name) } if s.webdavHelper != nil { - s.webdavHelper.copyArchive(s, name) + s.webdavHelper.copyArchive(name) } if s.sshHelper != nil { - s.sshHelper.copyArchive(s, name) + s.sshHelper.copyArchive(name) } if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - s.localHelper.copyArchive(s, name) + s.localHelper.copyArchive(name) } return nil } @@ -466,19 +457,19 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) if s.minioHelper != nil { - s.minioHelper.pruneBackups(s, deadline) + s.minioHelper.pruneBackups(deadline) } if s.webdavHelper != nil { - s.webdavHelper.pruneBackups(s, deadline) + s.webdavHelper.pruneBackups(deadline) } if s.sshHelper != nil { - s.sshHelper.pruneBackups(s, deadline) + s.sshHelper.pruneBackups(deadline) } if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - s.localHelper.pruneBackups(s, deadline) + s.localHelper.pruneBackups(deadline) } return nil diff --git a/cmd/backup/ssh.go b/cmd/backup/ssh.go index 42e289cf..f58b2f23 100644 --- a/cmd/backup/ssh.go +++ b/cmd/backup/ssh.go @@ -17,9 +17,14 @@ import ( type SshHelper struct { *AbstractHelper client *ssh.Client + s *script } func newSshHelper(s *script) (*SshHelper, error) { + if s.c.SSHHostName == "" { + return nil, nil + } + var authMethods []ssh.AuthMethod if s.c.SSHPassword != "" { @@ -70,19 +75,19 @@ func newSshHelper(s *script) (*SshHelper, error) { } a := &AbstractHelper{} - r := &SshHelper{a, sshClient} + r := &SshHelper{a, sshClient, s} a.Helper = r return r, nil } -func (helper *SshHelper) copyArchive(s *script, name string) error { - source, err := os.Open(s.file) +func (helper *SshHelper) copyArchive(name string) error { + source, err := os.Open(helper.s.file) if err != nil { return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) } defer source.Close() - destination, err := s.sftpClient.Create(filepath.Join(s.c.SSHRemotePath, name)) + destination, err := helper.s.sftpClient.Create(filepath.Join(helper.s.c.SSHRemotePath, name)) if err != nil { return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) } @@ -118,20 +123,20 @@ func (helper *SshHelper) copyArchive(s *script, name string) error { } } - s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", s.file, s.c.SSHHostName, s.c.SSHRemotePath) + helper.s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", helper.s.file, helper.s.c.SSHHostName, helper.s.c.SSHRemotePath) return nil } -func (helper *SshHelper) pruneBackups(s *script, deadline time.Time) error { - candidates, err := s.sftpClient.ReadDir(s.c.SSHRemotePath) +func (helper *SshHelper) pruneBackups(deadline time.Time) error { + candidates, err := helper.s.sftpClient.ReadDir(helper.s.c.SSHRemotePath) if err != nil { return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) } var matches []string for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), helper.s.c.BackupPruningPrefix) { continue } if candidate.ModTime().Before(deadline) { @@ -139,14 +144,14 @@ func (helper *SshHelper) pruneBackups(s *script, deadline time.Time) error { } } - s.stats.Storages.SSH = StorageStats{ + helper.s.stats.Storages.SSH = StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } - doPrune(s, len(matches), len(candidates), "SSH backup(s)", func() error { + doPrune(helper.s, len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { - if err := s.sftpClient.Remove(filepath.Join(s.c.SSHRemotePath, match)); err != nil { + if err := helper.s.sftpClient.Remove(filepath.Join(helper.s.c.SSHRemotePath, match)); err != nil { return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) } } diff --git a/cmd/backup/webdav.go b/cmd/backup/webdav.go index a4bde2da..6f739e2d 100644 --- a/cmd/backup/webdav.go +++ b/cmd/backup/webdav.go @@ -16,9 +16,14 @@ import ( type WebdavHelper struct { *AbstractHelper client *gowebdav.Client + s *script } func newWebdavHelper(s *script) (*WebdavHelper, error) { + if s.c.WebdavUrl == "" { + return nil, nil + } + if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") } else { @@ -35,37 +40,37 @@ func newWebdavHelper(s *script) (*WebdavHelper, error) { } a := &AbstractHelper{} - r := &WebdavHelper{a, webdavClient} + r := &WebdavHelper{a, webdavClient, s} a.Helper = r return r, nil } } -func (helper *WebdavHelper) copyArchive(s *script, name string) error { - bytes, err := os.ReadFile(s.file) +func (helper *WebdavHelper) copyArchive(name string) error { + bytes, err := os.ReadFile(helper.s.file) if err != nil { return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) } - if err := helper.client.MkdirAll(s.c.WebdavPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", s.c.WebdavPath, err) + if err := helper.client.MkdirAll(helper.s.c.WebdavPath, 0644); err != nil { + return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", helper.s.c.WebdavPath, err) } - if err := helper.client.Write(filepath.Join(s.c.WebdavPath, name), bytes, 0644); err != nil { + if err := helper.client.Write(filepath.Join(helper.s.c.WebdavPath, name), bytes, 0644); err != nil { return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) } - s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", s.file, s.c.WebdavUrl, s.c.WebdavPath) + helper.s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", helper.s.file, helper.s.c.WebdavUrl, helper.s.c.WebdavPath) return nil } -func (helper *WebdavHelper) pruneBackups(s *script, deadline time.Time) error { - candidates, err := helper.client.ReadDir(s.c.WebdavPath) +func (helper *WebdavHelper) pruneBackups(deadline time.Time) error { + candidates, err := helper.client.ReadDir(helper.s.c.WebdavPath) if err != nil { return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) } var matches []fs.FileInfo var lenCandidates int for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), s.c.BackupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), helper.s.c.BackupPruningPrefix) { continue } lenCandidates++ @@ -74,14 +79,14 @@ func (helper *WebdavHelper) pruneBackups(s *script, deadline time.Time) error { } } - s.stats.Storages.WebDAV = StorageStats{ + helper.s.stats.Storages.WebDAV = StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } - doPrune(s, len(matches), lenCandidates, "WebDAV backup(s)", func() error { + doPrune(helper.s, len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { - if err := helper.client.Remove(filepath.Join(s.c.WebdavPath, match.Name())); err != nil { + if err := helper.client.Remove(filepath.Join(helper.s.c.WebdavPath, match.Name())); err != nil { return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) } } From 5842ff1b02c10d94cfff56b38e0ed372abb65d00 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Fri, 22 Jul 2022 17:17:21 +0200 Subject: [PATCH 05/32] Created sub modules. Enhanced abstract implementation. --- cmd/backup/hooks.go | 4 +- cmd/backup/lock.go | 5 +- cmd/backup/notifications.go | 8 +- cmd/backup/script.go | 105 ++++++++++-------- cmd/backup/{ => storages}/local.go | 55 +++++---- cmd/backup/{ => storages}/minio.go | 64 ++++++----- cmd/backup/{ => storages}/ssh.go | 66 +++++------ cmd/backup/{helper.go => storages/storage.go} | 26 +++-- cmd/backup/storages/webdav.go | 101 +++++++++++++++++ cmd/backup/{ => types}/config.go | 23 ++-- cmd/backup/{ => types}/stats.go | 2 +- cmd/backup/{ => utilities}/util.go | 12 +- cmd/backup/webdav.go | 97 ---------------- 13 files changed, 305 insertions(+), 263 deletions(-) rename cmd/backup/{ => storages}/local.go (56%) rename cmd/backup/{ => storages}/minio.go (53%) rename cmd/backup/{ => storages}/ssh.go (60%) rename cmd/backup/{helper.go => storages/storage.go} (60%) create mode 100644 cmd/backup/storages/webdav.go rename cmd/backup/{ => types}/config.go (82%) rename cmd/backup/{ => types}/stats.go (98%) rename cmd/backup/{ => utilities}/util.go (88%) delete mode 100644 cmd/backup/webdav.go diff --git a/cmd/backup/hooks.go b/cmd/backup/hooks.go index ed81679c..b5277eda 100644 --- a/cmd/backup/hooks.go +++ b/cmd/backup/hooks.go @@ -6,6 +6,8 @@ package main import ( "fmt" "sort" + + u "github.com/offen/docker-volume-backup/cmd/backup/utilities" ) // hook contains a queued action that can be trigger them when the script @@ -50,7 +52,7 @@ func (s *script) runHooks(err error) error { } } if len(actionErrors) != 0 { - return join(actionErrors...) + return u.Join(actionErrors...) } return nil } diff --git a/cmd/backup/lock.go b/cmd/backup/lock.go index 2bb5a797..417206fc 100644 --- a/cmd/backup/lock.go +++ b/cmd/backup/lock.go @@ -9,6 +9,7 @@ import ( "time" "github.com/gofrs/flock" + u "github.com/offen/docker-volume-backup/cmd/backup/utilities" ) // lock opens a lockfile at the given location, keeping it locked until the @@ -31,7 +32,7 @@ func (s *script) lock(lockfile string) (func() error, error) { for { acquired, err := fileLock.TryLock() if err != nil { - return noop, fmt.Errorf("lock: error trying lock: %w", err) + return u.Noop, fmt.Errorf("lock: error trying lock: %w", err) } if acquired { if s.encounteredLock { @@ -52,7 +53,7 @@ func (s *script) lock(lockfile string) (func() error, error) { case <-retry.C: continue case <-deadline.C: - return noop, errors.New("lock: timed out waiting for lockfile to become available") + return u.Noop, errors.New("lock: timed out waiting for lockfile to become available") } } } diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index 80af395d..1bd65b34 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -12,6 +12,8 @@ import ( "time" sTypes "github.com/containrrr/shoutrrr/pkg/types" + t "github.com/offen/docker-volume-backup/cmd/backup/types" + u "github.com/offen/docker-volume-backup/cmd/backup/utilities" ) //go:embed notifications.tmpl @@ -20,8 +22,8 @@ var defaultNotifications string // NotificationData data to be passed to the notification templates type NotificationData struct { Error error - Config *Config - Stats *Stats + Config *t.Config + Stats *t.Stats } // notify sends a notification using the given title and body templates. @@ -68,7 +70,7 @@ func (s *script) sendNotification(title, body string) error { } } if len(errs) != 0 { - return fmt.Errorf("sendNotification: error sending message: %w", join(errs...)) + return fmt.Errorf("sendNotification: error sending message: %w", u.Join(errs...)) } return nil } diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 776751cd..9c12da11 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -14,6 +14,10 @@ import ( "text/template" "time" + strg "github.com/offen/docker-volume-backup/cmd/backup/storages" + t "github.com/offen/docker-volume-backup/cmd/backup/types" + u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + "github.com/containrrr/shoutrrr" "github.com/containrrr/shoutrrr/pkg/router" "github.com/docker/docker/api/types" @@ -23,7 +27,6 @@ import ( "github.com/kelseyhightower/envconfig" "github.com/leekchan/timeutil" "github.com/otiai10/copy" - "github.com/pkg/sftp" "github.com/sirupsen/logrus" "golang.org/x/crypto/openpgp" ) @@ -31,24 +34,23 @@ import ( // script holds all the stateful information required to orchestrate a // single backup run. type script struct { - cli *client.Client - minioHelper *MinioHelper - webdavHelper *WebdavHelper - sshHelper *SshHelper - localHelper *LocalHelper - sftpClient *sftp.Client - logger *logrus.Logger - sender *router.ServiceRouter - template *template.Template - hooks []hook - hookLevel hookLevel + cli *client.Client + s3Storage *strg.S3Storage + webdavStorage *strg.WebDavStorage + sshStorage *strg.SshStorage + localStorage *strg.LocalStorage + logger *logrus.Logger + sender *router.ServiceRouter + template *template.Template + hooks []hook + hookLevel hookLevel file string - stats *Stats + stats *t.Stats encounteredLock bool - c *Config + c *t.Config } // newScript creates all resources needed for the script to perform actions against @@ -56,19 +58,19 @@ type script struct { // reading from env vars or other configuration sources is expected to happen // in this method. func newScript() (*script, error) { - stdOut, logBuffer := buffer(os.Stdout) + stdOut, logBuffer := u.Buffer(os.Stdout) s := &script{ - c: &Config{}, + c: &t.Config{}, logger: &logrus.Logger{ Out: stdOut, Formatter: new(logrus.TextFormatter), Hooks: make(logrus.LevelHooks), Level: logrus.InfoLevel, }, - stats: &Stats{ + stats: &t.Stats{ StartTime: time.Now(), LogOutput: logBuffer, - Storages: StoragesStats{}, + Storages: t.StoragesStats{}, }, } @@ -82,6 +84,11 @@ func newScript() (*script, error) { return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err) } + s3Config := &t.S3Config{} + if err := envconfig.Process("Aws", s3Config); err != nil { + return nil, fmt.Errorf("newScript: failed to process configuration values for AWS: %w", err) + } + s.file = path.Join("/tmp", s.c.BackupFilename) if s.c.BackupFilenameExpand { s.file = os.ExpandEnv(s.file) @@ -100,19 +107,19 @@ func newScript() (*script, error) { s.cli = cli } - if s.minioHelper, err = newMinioHelper(s); err != nil { + if s.s3Storage, err = strg.InitS3(s3Config); err != nil { return nil, err } - if s.webdavHelper, err = newWebdavHelper(s); err != nil { + if s.webdavStorage, err = strg.InitWebDav(s.c); err != nil { return nil, err } - if s.sshHelper, err = newSshHelper(s); err != nil { + if s.sshStorage, err = strg.InitSSH(s.c); err != nil { return nil, err } - s.localHelper = newLocalhelper(s) + s.localStorage = strg.InitLocal(s.c) if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( @@ -185,14 +192,14 @@ func newScript() (*script, error) { // restart everything that has been stopped. func (s *script) stopContainers() (func() error, error) { if s.cli == nil { - return noop, nil + return u.Noop, nil } allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ Quiet: true, }) if err != nil { - return noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) + return u.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) } containerLabel := fmt.Sprintf( @@ -208,11 +215,11 @@ func (s *script) stopContainers() (func() error, error) { }) if err != nil { - return noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) + return u.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) } if len(containersToStop) == 0 { - return noop, nil + return u.Noop, nil } s.logger.Infof( @@ -237,11 +244,11 @@ func (s *script) stopContainers() (func() error, error) { stopError = fmt.Errorf( "stopContainersAndRun: %d error(s) stopping containers: %w", len(stopErrors), - join(stopErrors...), + u.Join(stopErrors...), ) } - s.stats.Containers = ContainersStats{ + s.stats.Containers = t.ContainersStats{ All: uint(len(allContainers)), ToStop: uint(len(containersToStop)), Stopped: uint(len(stoppedContainers)), @@ -288,7 +295,7 @@ func (s *script) stopContainers() (func() error, error) { return fmt.Errorf( "stopContainersAndRun: %d error(s) restarting containers and services: %w", len(restartErrors), - join(restartErrors...), + u.Join(restartErrors...), ) } s.logger.Infof( @@ -314,7 +321,7 @@ func (s *script) createArchive() error { backupSources = filepath.Join("/tmp", s.c.BackupSources) // copy before compressing guard against a situation where backup folder's content are still growing. s.registerHook(hookLevelPlumbing, func(error) error { - if err := remove(backupSources); err != nil { + if err := u.Remove(backupSources); err != nil { return fmt.Errorf("takeBackup: error removing snapshot: %w", err) } s.logger.Infof("Removed snapshot `%s`.", backupSources) @@ -331,7 +338,7 @@ func (s *script) createArchive() error { tarFile := s.file s.registerHook(hookLevelPlumbing, func(error) error { - if err := remove(tarFile); err != nil { + if err := u.Remove(tarFile); err != nil { return fmt.Errorf("takeBackup: error removing tar file: %w", err) } s.logger.Infof("Removed tar file `%s`.", tarFile) @@ -376,7 +383,7 @@ func (s *script) encryptArchive() error { gpgFile := fmt.Sprintf("%s.gpg", s.file) s.registerHook(hookLevelPlumbing, func(error) error { - if err := remove(gpgFile); err != nil { + if err := u.Remove(gpgFile); err != nil { return fmt.Errorf("encryptBackup: error removing gpg file: %w", err) } s.logger.Infof("Removed GPG file `%s`.", gpgFile) @@ -384,20 +391,20 @@ func (s *script) encryptArchive() error { }) outFile, err := os.Create(gpgFile) - defer outFile.Close() if err != nil { return fmt.Errorf("encryptBackup: error opening out file: %w", err) } + defer outFile.Close() _, name := path.Split(s.file) dst, err := openpgp.SymmetricallyEncrypt(outFile, []byte(s.c.GpgPassphrase), &openpgp.FileHints{ IsBinary: true, FileName: name, }, nil) - defer dst.Close() if err != nil { return fmt.Errorf("encryptBackup: error encrypting backup file: %w", err) } + defer dst.Close() src, err := os.Open(s.file) if err != nil { @@ -421,27 +428,27 @@ func (s *script) copyArchive() error { return fmt.Errorf("copyBackup: unable to stat backup file: %w", err) } else { size := stat.Size() - s.stats.BackupFile = BackupFileStats{ + s.stats.BackupFile = t.BackupFileStats{ Size: uint64(size), Name: name, FullPath: s.file, } } - if s.minioHelper != nil { - s.minioHelper.copyArchive(name) + if s.s3Storage != nil { + s.s3Storage.Copy(s.file) } - if s.webdavHelper != nil { - s.webdavHelper.copyArchive(name) + if s.webdavStorage != nil { + s.webdavStorage.Copy(s.file) } - if s.sshHelper != nil { - s.sshHelper.copyArchive(name) + if s.sshStorage != nil { + s.sshStorage.Copy(s.file) } if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - s.localHelper.copyArchive(name) + s.localStorage.Copy(s.file) } return nil } @@ -456,20 +463,20 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) - if s.minioHelper != nil { - s.minioHelper.pruneBackups(deadline) + if s.s3Storage != nil { + s.s3Storage.Prune(deadline) } - if s.webdavHelper != nil { - s.webdavHelper.pruneBackups(deadline) + if s.webdavStorage != nil { + s.webdavStorage.Prune(deadline) } - if s.sshHelper != nil { - s.sshHelper.pruneBackups(deadline) + if s.sshStorage != nil { + s.sshStorage.Prune(deadline) } if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - s.localHelper.pruneBackups(deadline) + s.localStorage.Prune(deadline) } return nil diff --git a/cmd/backup/local.go b/cmd/backup/storages/local.go similarity index 56% rename from cmd/backup/local.go rename to cmd/backup/storages/local.go index 34f93043..ef291a55 100644 --- a/cmd/backup/local.go +++ b/cmd/backup/storages/local.go @@ -1,4 +1,4 @@ -package main +package storages import ( "fmt" @@ -6,47 +6,54 @@ import ( "path" "path/filepath" "time" + + t "github.com/offen/docker-volume-backup/cmd/backup/types" + u "github.com/offen/docker-volume-backup/cmd/backup/utilities" ) -type LocalHelper struct { - *AbstractHelper - s *script +type LocalStorage struct { + *GenericStorage + backupArchive string + backupLatestSymlink string } -func newLocalhelper(s *script) *LocalHelper { - a := &AbstractHelper{} - r := &LocalHelper{a, s} - a.Helper = r +func InitLocal(c *t.Config) *LocalStorage { + a := &GenericStorage{} + r := &LocalStorage{a, c.BackupArchive, c.BackupLatestSymlink} + a.Storage = r return r } -func (helper *LocalHelper) copyArchive(name string) error { - if err := copyFile(helper.s.file, path.Join(helper.s.c.BackupArchive, name)); err != nil { +func (lc *LocalStorage) Copy(file string) error { + _, name := path.Split(file) + + if err := u.CopyFile(file, path.Join(lc.backupArchive, name)); err != nil { return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) } - helper.s.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", helper.s.file, helper.s.c.BackupArchive) - if helper.s.c.BackupLatestSymlink != "" { - symlink := path.Join(helper.s.c.BackupArchive, helper.s.c.BackupLatestSymlink) + lc.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, lc.backupArchive) + + if lc.backupLatestSymlink != "" { + symlink := path.Join(lc.backupArchive, lc.backupLatestSymlink) if _, err := os.Lstat(symlink); err == nil { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) } - helper.s.logger.Infof("Created/Updated symlink `%s` for latest backup.", helper.s.c.BackupLatestSymlink) + lc.logger.Infof("Created/Updated symlink `%s` for latest backup.", lc.backupLatestSymlink) } return nil } -func (helper *LocalHelper) pruneBackups(deadline time.Time) error { +func (lc *LocalStorage) Prune(deadline time.Time) (*t.StorageStats, error) { globPattern := path.Join( - helper.s.c.BackupArchive, - fmt.Sprintf("%s*", helper.s.c.BackupPruningPrefix), + lc.backupArchive, + fmt.Sprintf("%s*", lc.backupPruningPrefix), ) globMatches, err := filepath.Glob(globPattern) if err != nil { - return fmt.Errorf( + return nil, fmt.Errorf( "pruneBackups: error looking up matching files using pattern %s: %w", globPattern, err, @@ -57,7 +64,7 @@ func (helper *LocalHelper) pruneBackups(deadline time.Time) error { for _, candidate := range globMatches { fi, err := os.Lstat(candidate) if err != nil { - return fmt.Errorf( + return nil, fmt.Errorf( "pruneBackups: error calling Lstat on file %s: %w", candidate, err, @@ -73,7 +80,7 @@ func (helper *LocalHelper) pruneBackups(deadline time.Time) error { for _, candidate := range candidates { fi, err := os.Stat(candidate) if err != nil { - return fmt.Errorf( + return nil, fmt.Errorf( "pruneBackups: error calling stat on file %s: %w", candidate, err, @@ -84,12 +91,12 @@ func (helper *LocalHelper) pruneBackups(deadline time.Time) error { } } - helper.s.stats.Storages.Local = StorageStats{ + stats := t.StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } - doPrune(helper.s, len(matches), len(candidates), "local backup(s)", func() error { + lc.doPrune(len(matches), len(candidates), "local backup(s)", func() error { var removeErrors []error for _, match := range matches { if err := os.Remove(match); err != nil { @@ -100,11 +107,11 @@ func (helper *LocalHelper) pruneBackups(deadline time.Time) error { return fmt.Errorf( "pruneBackups: %d error(s) deleting local files, starting with: %w", len(removeErrors), - join(removeErrors...), + u.Join(removeErrors...), ) } return nil }) - return nil + return &stats, nil } diff --git a/cmd/backup/minio.go b/cmd/backup/storages/minio.go similarity index 53% rename from cmd/backup/minio.go rename to cmd/backup/storages/minio.go index f6ef59a3..617e3344 100644 --- a/cmd/backup/minio.go +++ b/cmd/backup/storages/minio.go @@ -1,46 +1,49 @@ -package main +package storages import ( "context" "errors" "fmt" + "path" "path/filepath" "time" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" + t "github.com/offen/docker-volume-backup/cmd/backup/types" + u "github.com/offen/docker-volume-backup/cmd/backup/utilities" ) -type MinioHelper struct { - *AbstractHelper +type S3Storage struct { + *GenericStorage client *minio.Client - s *script + config *t.S3Config } -func newMinioHelper(s *script) (*MinioHelper, error) { - if s.c.AwsS3BucketName == "" { +func InitS3(c *t.S3Config) (*S3Storage, error) { + if c.AwsS3BucketName == "" { return nil, nil } var creds *credentials.Credentials - if s.c.AwsAccessKeyID != "" && s.c.AwsSecretAccessKey != "" { + if c.AwsAccessKeyID != "" && c.AwsSecretAccessKey != "" { creds = credentials.NewStaticV4( - s.c.AwsAccessKeyID, - s.c.AwsSecretAccessKey, + c.AwsAccessKeyID, + c.AwsSecretAccessKey, "", ) - } else if s.c.AwsIamRoleEndpoint != "" { - creds = credentials.NewIAM(s.c.AwsIamRoleEndpoint) + } else if c.AwsIamRoleEndpoint != "" { + creds = credentials.NewIAM(c.AwsIamRoleEndpoint) } else { return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") } options := minio.Options{ Creds: creds, - Secure: s.c.AwsEndpointProto == "https", + Secure: c.AwsEndpointProto == "https", } - if s.c.AwsEndpointInsecure { + if c.AwsEndpointInsecure { if !options.Secure { return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https") } @@ -53,33 +56,34 @@ func newMinioHelper(s *script) (*MinioHelper, error) { options.Transport = transport } - mc, err := minio.New(s.c.AwsEndpoint, &options) + mc, err := minio.New(c.AwsEndpoint, &options) if err != nil { return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) } - a := &AbstractHelper{} - r := &MinioHelper{a, mc, s} - a.Helper = r + a := &GenericStorage{} + r := &S3Storage{a, mc, c} + a.Storage = r return r, nil } -func (helper *MinioHelper) copyArchive(name string) error { - if _, err := helper.client.FPutObject(context.Background(), helper.s.c.AwsS3BucketName, filepath.Join(helper.s.c.AwsS3Path, name), helper.s.file, minio.PutObjectOptions{ +func (s3 *S3Storage) Copy(file string) error { + _, name := path.Split(file) + if _, err := s3.client.FPutObject(context.Background(), s3.config.AwsS3BucketName, filepath.Join(s3.config.AwsS3Path, name), file, minio.PutObjectOptions{ ContentType: "application/tar+gzip", - StorageClass: helper.s.c.AwsStorageClass, + StorageClass: s3.config.AwsStorageClass, }); err != nil { return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err) } - helper.s.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", helper.s.file, helper.s.c.AwsS3BucketName) + s3.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", file, s3.config.AwsS3BucketName) return nil } -func (helper *MinioHelper) pruneBackups(deadline time.Time) error { - candidates := helper.client.ListObjects(context.Background(), helper.s.c.AwsS3BucketName, minio.ListObjectsOptions{ +func (s3 *S3Storage) Prune(deadline time.Time) (*t.StorageStats, error) { + candidates := s3.client.ListObjects(context.Background(), s3.config.AwsS3BucketName, minio.ListObjectsOptions{ WithMetadata: true, - Prefix: filepath.Join(helper.s.c.AwsS3Path, helper.s.c.BackupPruningPrefix), + Prefix: filepath.Join(s3.config.AwsS3Path, s3.backupPruningPrefix), Recursive: true, }) @@ -88,7 +92,7 @@ func (helper *MinioHelper) pruneBackups(deadline time.Time) error { for candidate := range candidates { lenCandidates++ if candidate.Err != nil { - return fmt.Errorf( + return nil, fmt.Errorf( "pruneBackups: error looking up candidates from remote storage: %w", candidate.Err, ) @@ -98,12 +102,12 @@ func (helper *MinioHelper) pruneBackups(deadline time.Time) error { } } - helper.s.stats.Storages.S3 = StorageStats{ + stats := t.StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } - doPrune(helper.s, len(matches), lenCandidates, "remote backup(s)", func() error { + s3.doPrune(len(matches), lenCandidates, "remote backup(s)", func() error { objectsCh := make(chan minio.ObjectInfo) go func() { for _, match := range matches { @@ -111,7 +115,7 @@ func (helper *MinioHelper) pruneBackups(deadline time.Time) error { } close(objectsCh) }() - errChan := helper.client.RemoveObjects(context.Background(), helper.s.c.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) + errChan := s3.client.RemoveObjects(context.Background(), s3.config.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) var removeErrors []error for result := range errChan { if result.Err != nil { @@ -119,10 +123,10 @@ func (helper *MinioHelper) pruneBackups(deadline time.Time) error { } } if len(removeErrors) != 0 { - return join(removeErrors...) + return u.Join(removeErrors...) } return nil }) - return nil + return &stats, nil } diff --git a/cmd/backup/ssh.go b/cmd/backup/storages/ssh.go similarity index 60% rename from cmd/backup/ssh.go rename to cmd/backup/storages/ssh.go index f58b2f23..801d0760 100644 --- a/cmd/backup/ssh.go +++ b/cmd/backup/storages/ssh.go @@ -1,4 +1,4 @@ -package main +package storages import ( "errors" @@ -6,40 +6,44 @@ import ( "io" "io/ioutil" "os" + "path" "path/filepath" "strings" "time" + t "github.com/offen/docker-volume-backup/cmd/backup/types" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" ) -type SshHelper struct { - *AbstractHelper - client *ssh.Client - s *script +type SshStorage struct { + *GenericStorage + client *ssh.Client + sftpClient *sftp.Client + sshRemotePath string + sshHostName string } -func newSshHelper(s *script) (*SshHelper, error) { - if s.c.SSHHostName == "" { +func InitSSH(c *t.Config) (*SshStorage, error) { + if c.SSHHostName == "" { return nil, nil } var authMethods []ssh.AuthMethod - if s.c.SSHPassword != "" { - authMethods = append(authMethods, ssh.Password(s.c.SSHPassword)) + if c.SSHPassword != "" { + authMethods = append(authMethods, ssh.Password(c.SSHPassword)) } - if _, err := os.Stat(s.c.SSHIdentityFile); err == nil { - key, err := ioutil.ReadFile(s.c.SSHIdentityFile) + if _, err := os.Stat(c.SSHIdentityFile); err == nil { + key, err := ioutil.ReadFile(c.SSHIdentityFile) if err != nil { return nil, errors.New("newScript: error reading the private key") } var signer ssh.Signer - if s.c.SSHIdentityPassphrase != "" { - signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(s.c.SSHIdentityPassphrase)) + if c.SSHIdentityPassphrase != "" { + signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(c.SSHIdentityPassphrase)) if err != nil { return nil, errors.New("newScript: error parsing the encrypted private key") } @@ -54,11 +58,11 @@ func newSshHelper(s *script) (*SshHelper, error) { } sshClientConfig := &ssh.ClientConfig{ - User: s.c.SSHUser, + User: c.SSHUser, Auth: authMethods, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } - sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", s.c.SSHHostName, s.c.SSHPort), sshClientConfig) + sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", c.SSHHostName, c.SSHPort), sshClientConfig) if err != nil { return nil, fmt.Errorf("newScript: error creating ssh client: %w", err) @@ -69,25 +73,25 @@ func newSshHelper(s *script) (*SshHelper, error) { } sftpClient, err := sftp.NewClient(sshClient) - s.sftpClient = sftpClient if err != nil { return nil, fmt.Errorf("newScript: error creating sftp client: %w", err) } - a := &AbstractHelper{} - r := &SshHelper{a, sshClient, s} - a.Helper = r + a := &GenericStorage{} + r := &SshStorage{a, sshClient, sftpClient, c.SSHRemotePath, c.SSHHostName} + a.Storage = r return r, nil } -func (helper *SshHelper) copyArchive(name string) error { - source, err := os.Open(helper.s.file) +func (sh *SshStorage) Copy(file string) error { + source, err := os.Open(file) + _, name := path.Split(file) if err != nil { return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) } defer source.Close() - destination, err := helper.s.sftpClient.Create(filepath.Join(helper.s.c.SSHRemotePath, name)) + destination, err := sh.sftpClient.Create(filepath.Join(sh.sshRemotePath, name)) if err != nil { return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) } @@ -123,20 +127,20 @@ func (helper *SshHelper) copyArchive(name string) error { } } - helper.s.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", helper.s.file, helper.s.c.SSHHostName, helper.s.c.SSHRemotePath) + sh.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, sh.sshHostName, sh.sshRemotePath) return nil } -func (helper *SshHelper) pruneBackups(deadline time.Time) error { - candidates, err := helper.s.sftpClient.ReadDir(helper.s.c.SSHRemotePath) +func (sh *SshStorage) Prune(deadline time.Time) (*t.StorageStats, error) { + candidates, err := sh.sftpClient.ReadDir(sh.sshRemotePath) if err != nil { - return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) + return nil, fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) } var matches []string for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), helper.s.c.BackupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), sh.backupPruningPrefix) { continue } if candidate.ModTime().Before(deadline) { @@ -144,19 +148,19 @@ func (helper *SshHelper) pruneBackups(deadline time.Time) error { } } - helper.s.stats.Storages.SSH = StorageStats{ + stats := t.StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } - doPrune(helper.s, len(matches), len(candidates), "SSH backup(s)", func() error { + sh.doPrune(len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { - if err := helper.s.sftpClient.Remove(filepath.Join(helper.s.c.SSHRemotePath, match)); err != nil { + if err := sh.sftpClient.Remove(filepath.Join(sh.sshRemotePath, match)); err != nil { return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) } } return nil }) - return nil + return &stats, nil } diff --git a/cmd/backup/helper.go b/cmd/backup/storages/storage.go similarity index 60% rename from cmd/backup/helper.go rename to cmd/backup/storages/storage.go index 57d88c85..ef203675 100644 --- a/cmd/backup/helper.go +++ b/cmd/backup/storages/storage.go @@ -1,19 +1,27 @@ -package main +package storages -import "time" +import ( + "time" -type Helper interface { - copyArchive(name string) error - pruneBackups(deadline time.Time) error + t "github.com/offen/docker-volume-backup/cmd/backup/types" + "github.com/sirupsen/logrus" +) + +type Storage interface { + Copy(file string) error + Prune(deadline time.Time) (*t.StorageStats, error) } -type AbstractHelper struct { - Helper +type GenericStorage struct { + Storage + backupRetentionDays int32 + backupPruningPrefix string + logger *logrus.Logger } // doPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. -func doPrune(s *script, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { +func (s *GenericStorage) doPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { if lenMatches != 0 && lenMatches != lenCandidates { if err := doRemoveFiles(); err != nil { return err @@ -23,7 +31,7 @@ func doPrune(s *script, lenMatches, lenCandidates int, description string, doRem lenMatches, lenCandidates, description, - s.c.BackupRetentionDays, + s.backupRetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) diff --git a/cmd/backup/storages/webdav.go b/cmd/backup/storages/webdav.go new file mode 100644 index 00000000..482ee564 --- /dev/null +++ b/cmd/backup/storages/webdav.go @@ -0,0 +1,101 @@ +package storages + +import ( + "errors" + "fmt" + "io/fs" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" + + t "github.com/offen/docker-volume-backup/cmd/backup/types" + "github.com/studio-b12/gowebdav" +) + +type WebDavStorage struct { + *GenericStorage + client *gowebdav.Client + webdavUrl string + webdavPath string +} + +func InitWebDav(c *t.Config) (*WebDavStorage, error) { + if c.WebdavUrl == "" { + return nil, nil + } + + if c.WebdavUsername == "" || c.WebdavPassword == "" { + return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") + } else { + webdavClient := gowebdav.NewClient(c.WebdavUrl, c.WebdavUsername, c.WebdavPassword) + + if c.WebdavUrlInsecure { + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") + } + webdavTransport := defaultTransport.Clone() + webdavTransport.TLSClientConfig.InsecureSkipVerify = c.WebdavUrlInsecure + webdavClient.SetTransport(webdavTransport) + } + + a := &GenericStorage{} + r := &WebDavStorage{a, webdavClient, c.WebdavUrl, c.WebdavPath} + a.Storage = r + return r, nil + } +} + +func (wd *WebDavStorage) Copy(file string) error { + bytes, err := os.ReadFile(file) + _, name := path.Split(file) + if err != nil { + return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) + } + if err := wd.client.MkdirAll(wd.webdavPath, 0644); err != nil { + return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", wd.webdavPath, err) + } + if err := wd.client.Write(filepath.Join(wd.webdavPath, name), bytes, 0644); err != nil { + return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) + } + wd.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, wd.webdavUrl, wd.webdavPath) + + return nil +} + +func (wd *WebDavStorage) Prune(deadline time.Time) (*t.StorageStats, error) { + candidates, err := wd.client.ReadDir(wd.webdavPath) + if err != nil { + return nil, fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) + } + var matches []fs.FileInfo + var lenCandidates int + for _, candidate := range candidates { + if !strings.HasPrefix(candidate.Name(), wd.backupPruningPrefix) { + continue + } + lenCandidates++ + if candidate.ModTime().Before(deadline) { + matches = append(matches, candidate) + } + } + + stats := t.StorageStats{ + Total: uint(lenCandidates), + Pruned: uint(len(matches)), + } + + wd.doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { + for _, match := range matches { + if err := wd.client.Remove(filepath.Join(wd.webdavPath, match.Name())); err != nil { + return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) + } + } + return nil + }) + + return &stats, nil +} diff --git a/cmd/backup/config.go b/cmd/backup/types/config.go similarity index 82% rename from cmd/backup/config.go rename to cmd/backup/types/config.go index e12d6826..b4dd1799 100644 --- a/cmd/backup/config.go +++ b/cmd/backup/types/config.go @@ -1,7 +1,7 @@ // Copyright 2022 - Offen Authors // SPDX-License-Identifier: MPL-2.0 -package main +package types import ( "fmt" @@ -23,15 +23,6 @@ type Config struct { BackupStopContainerLabel string `split_words:"true" default:"true"` BackupFromSnapshot bool `split_words:"true"` BackupExcludeRegexp RegexpDecoder `split_words:"true"` - AwsS3BucketName string `split_words:"true"` - AwsS3Path string `split_words:"true"` - AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"` - AwsEndpointProto string `split_words:"true" default:"https"` - AwsEndpointInsecure bool `split_words:"true"` - AwsStorageClass string `split_words:"true"` - AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` - AwsSecretAccessKey string `split_words:"true"` - AwsIamRoleEndpoint string `split_words:"true"` GpgPassphrase string `split_words:"true"` NotificationURLs []string `envconfig:"NOTIFICATION_URLS"` NotificationLevel string `split_words:"true" default:"error"` @@ -58,6 +49,18 @@ type Config struct { LockTimeout time.Duration `split_words:"true" default:"60m"` } +type S3Config struct { + AwsS3BucketName string `split_words:"true"` + AwsS3Path string `split_words:"true"` + AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"` + AwsEndpointProto string `split_words:"true" default:"https"` + AwsEndpointInsecure bool `split_words:"true"` + AwsStorageClass string `split_words:"true"` + AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` + AwsSecretAccessKey string `split_words:"true"` + AwsIamRoleEndpoint string `split_words:"true"` +} + type RegexpDecoder struct { Re *regexp.Regexp } diff --git a/cmd/backup/stats.go b/cmd/backup/types/stats.go similarity index 98% rename from cmd/backup/stats.go rename to cmd/backup/types/stats.go index bf8e46ef..a0ad7994 100644 --- a/cmd/backup/stats.go +++ b/cmd/backup/types/stats.go @@ -1,7 +1,7 @@ // Copyright 2022 - Offen Authors // SPDX-License-Identifier: MPL-2.0 -package main +package types import ( "bytes" diff --git a/cmd/backup/util.go b/cmd/backup/utilities/util.go similarity index 88% rename from cmd/backup/util.go rename to cmd/backup/utilities/util.go index fd80da6a..45bbf01e 100644 --- a/cmd/backup/util.go +++ b/cmd/backup/utilities/util.go @@ -1,7 +1,7 @@ // Copyright 2022 - Offen Authors // SPDX-License-Identifier: MPL-2.0 -package main +package utilites import ( "bytes" @@ -12,10 +12,10 @@ import ( "strings" ) -var noop = func() error { return nil } +var Noop = func() error { return nil } // copy creates a copy of the file located at `dst` at `src`. -func copyFile(src, dst string) error { +func CopyFile(src, dst string) error { in, err := os.Open(src) if err != nil { return err @@ -36,7 +36,7 @@ func copyFile(src, dst string) error { } // join takes a list of errors and joins them into a single error -func join(errs ...error) error { +func Join(errs ...error) error { if len(errs) == 1 { return errs[0] } @@ -51,7 +51,7 @@ func join(errs ...error) error { } // remove removes the given file or directory from disk. -func remove(location string) error { +func Remove(location string) error { fi, err := os.Lstat(location) if err != nil { if os.IsNotExist(err) { @@ -72,7 +72,7 @@ func remove(location string) error { // buffer takes an io.Writer and returns a wrapped version of the // writer that writes to both the original target as well as the returned buffer -func buffer(w io.Writer) (io.Writer, *bytes.Buffer) { +func Buffer(w io.Writer) (io.Writer, *bytes.Buffer) { buffering := &bufferingWriter{buf: bytes.Buffer{}, writer: w} return buffering, &buffering.buf } diff --git a/cmd/backup/webdav.go b/cmd/backup/webdav.go deleted file mode 100644 index 6f739e2d..00000000 --- a/cmd/backup/webdav.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "io/fs" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/studio-b12/gowebdav" -) - -type WebdavHelper struct { - *AbstractHelper - client *gowebdav.Client - s *script -} - -func newWebdavHelper(s *script) (*WebdavHelper, error) { - if s.c.WebdavUrl == "" { - return nil, nil - } - - if s.c.WebdavUsername == "" || s.c.WebdavPassword == "" { - return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") - } else { - webdavClient := gowebdav.NewClient(s.c.WebdavUrl, s.c.WebdavUsername, s.c.WebdavPassword) - - if s.c.WebdavUrlInsecure { - defaultTransport, ok := http.DefaultTransport.(*http.Transport) - if !ok { - return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") - } - webdavTransport := defaultTransport.Clone() - webdavTransport.TLSClientConfig.InsecureSkipVerify = s.c.WebdavUrlInsecure - webdavClient.SetTransport(webdavTransport) - } - - a := &AbstractHelper{} - r := &WebdavHelper{a, webdavClient, s} - a.Helper = r - return r, nil - } -} - -func (helper *WebdavHelper) copyArchive(name string) error { - bytes, err := os.ReadFile(helper.s.file) - if err != nil { - return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) - } - if err := helper.client.MkdirAll(helper.s.c.WebdavPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", helper.s.c.WebdavPath, err) - } - if err := helper.client.Write(filepath.Join(helper.s.c.WebdavPath, name), bytes, 0644); err != nil { - return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) - } - helper.s.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", helper.s.file, helper.s.c.WebdavUrl, helper.s.c.WebdavPath) - - return nil -} - -func (helper *WebdavHelper) pruneBackups(deadline time.Time) error { - candidates, err := helper.client.ReadDir(helper.s.c.WebdavPath) - if err != nil { - return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) - } - var matches []fs.FileInfo - var lenCandidates int - for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), helper.s.c.BackupPruningPrefix) { - continue - } - lenCandidates++ - if candidate.ModTime().Before(deadline) { - matches = append(matches, candidate) - } - } - - helper.s.stats.Storages.WebDAV = StorageStats{ - Total: uint(lenCandidates), - Pruned: uint(len(matches)), - } - - doPrune(helper.s, len(matches), lenCandidates, "WebDAV backup(s)", func() error { - for _, match := range matches { - if err := helper.client.Remove(filepath.Join(helper.s.c.WebdavPath, match.Name())); err != nil { - return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) - } - } - return nil - }) - - return nil -} From 7104600435651f4533e1dd5662551d20a37ae4f0 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Fri, 22 Jul 2022 17:23:10 +0200 Subject: [PATCH 06/32] Fixed config issue --- cmd/backup/script.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 9c12da11..f3793e0f 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -85,7 +85,7 @@ func newScript() (*script, error) { } s3Config := &t.S3Config{} - if err := envconfig.Process("Aws", s3Config); err != nil { + if err := envconfig.Process("", s3Config); err != nil { return nil, fmt.Errorf("newScript: failed to process configuration values for AWS: %w", err) } From 195f61ab059e0c734804631df1fc8088d36e7bf0 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Fri, 22 Jul 2022 18:04:58 +0200 Subject: [PATCH 07/32] Fixed declaration issues. Added config to interface. --- cmd/backup/script.go | 13 ++++--------- cmd/backup/storages/local.go | 23 +++++++++++++---------- cmd/backup/storages/minio.go | 11 ++++++++--- cmd/backup/storages/ssh.go | 23 +++++++++++++---------- cmd/backup/storages/storage.go | 1 + cmd/backup/storages/webdav.go | 25 ++++++++++++++----------- cmd/backup/types/config.go | 21 +++++++++------------ 7 files changed, 62 insertions(+), 55 deletions(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index f3793e0f..29cd4f77 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -84,11 +84,6 @@ func newScript() (*script, error) { return nil, fmt.Errorf("newScript: failed to process configuration values: %w", err) } - s3Config := &t.S3Config{} - if err := envconfig.Process("", s3Config); err != nil { - return nil, fmt.Errorf("newScript: failed to process configuration values for AWS: %w", err) - } - s.file = path.Join("/tmp", s.c.BackupFilename) if s.c.BackupFilenameExpand { s.file = os.ExpandEnv(s.file) @@ -107,19 +102,19 @@ func newScript() (*script, error) { s.cli = cli } - if s.s3Storage, err = strg.InitS3(s3Config); err != nil { + if s.s3Storage, err = strg.InitS3(s.c, s.logger); err != nil { return nil, err } - if s.webdavStorage, err = strg.InitWebDav(s.c); err != nil { + if s.webdavStorage, err = strg.InitWebDav(s.c, s.logger); err != nil { return nil, err } - if s.sshStorage, err = strg.InitSSH(s.c); err != nil { + if s.sshStorage, err = strg.InitSSH(s.c, s.logger); err != nil { return nil, err } - s.localStorage = strg.InitLocal(s.c) + s.localStorage = strg.InitLocal(s.c, s.logger) if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( diff --git a/cmd/backup/storages/local.go b/cmd/backup/storages/local.go index ef291a55..b562fe06 100644 --- a/cmd/backup/storages/local.go +++ b/cmd/backup/storages/local.go @@ -9,17 +9,20 @@ import ( t "github.com/offen/docker-volume-backup/cmd/backup/types" u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + "github.com/sirupsen/logrus" ) type LocalStorage struct { *GenericStorage - backupArchive string - backupLatestSymlink string } -func InitLocal(c *t.Config) *LocalStorage { +func InitLocal(c *t.Config, l *logrus.Logger) *LocalStorage { a := &GenericStorage{} - r := &LocalStorage{a, c.BackupArchive, c.BackupLatestSymlink} + r := &LocalStorage{a} + a.backupRetentionDays = c.BackupRetentionDays + a.backupPruningPrefix = c.BackupPruningPrefix + a.logger = l + a.config = c a.Storage = r return r } @@ -27,20 +30,20 @@ func InitLocal(c *t.Config) *LocalStorage { func (lc *LocalStorage) Copy(file string) error { _, name := path.Split(file) - if err := u.CopyFile(file, path.Join(lc.backupArchive, name)); err != nil { + if err := u.CopyFile(file, path.Join(lc.config.BackupArchive, name)); err != nil { return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) } - lc.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, lc.backupArchive) + lc.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, lc.config.BackupArchive) - if lc.backupLatestSymlink != "" { - symlink := path.Join(lc.backupArchive, lc.backupLatestSymlink) + if lc.config.BackupLatestSymlink != "" { + symlink := path.Join(lc.config.BackupArchive, lc.config.BackupLatestSymlink) if _, err := os.Lstat(symlink); err == nil { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) } - lc.logger.Infof("Created/Updated symlink `%s` for latest backup.", lc.backupLatestSymlink) + lc.logger.Infof("Created/Updated symlink `%s` for latest backup.", lc.config.BackupLatestSymlink) } return nil @@ -48,7 +51,7 @@ func (lc *LocalStorage) Copy(file string) error { func (lc *LocalStorage) Prune(deadline time.Time) (*t.StorageStats, error) { globPattern := path.Join( - lc.backupArchive, + lc.config.BackupArchive, fmt.Sprintf("%s*", lc.backupPruningPrefix), ) globMatches, err := filepath.Glob(globPattern) diff --git a/cmd/backup/storages/minio.go b/cmd/backup/storages/minio.go index 617e3344..0252d17b 100644 --- a/cmd/backup/storages/minio.go +++ b/cmd/backup/storages/minio.go @@ -12,15 +12,15 @@ import ( "github.com/minio/minio-go/v7/pkg/credentials" t "github.com/offen/docker-volume-backup/cmd/backup/types" u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + "github.com/sirupsen/logrus" ) type S3Storage struct { *GenericStorage client *minio.Client - config *t.S3Config } -func InitS3(c *t.S3Config) (*S3Storage, error) { +func InitS3(c *t.Config, l *logrus.Logger) (*S3Storage, error) { if c.AwsS3BucketName == "" { return nil, nil } @@ -62,12 +62,17 @@ func InitS3(c *t.S3Config) (*S3Storage, error) { } a := &GenericStorage{} - r := &S3Storage{a, mc, c} + r := &S3Storage{a, mc} + a.backupRetentionDays = c.BackupRetentionDays + a.backupPruningPrefix = c.BackupPruningPrefix + a.logger = l + a.config = c a.Storage = r return r, nil } func (s3 *S3Storage) Copy(file string) error { + s3.logger.Infof("copyArchive->s3stg: Beginning...") _, name := path.Split(file) if _, err := s3.client.FPutObject(context.Background(), s3.config.AwsS3BucketName, filepath.Join(s3.config.AwsS3Path, name), file, minio.PutObjectOptions{ ContentType: "application/tar+gzip", diff --git a/cmd/backup/storages/ssh.go b/cmd/backup/storages/ssh.go index 801d0760..609b6b64 100644 --- a/cmd/backup/storages/ssh.go +++ b/cmd/backup/storages/ssh.go @@ -13,18 +13,17 @@ import ( t "github.com/offen/docker-volume-backup/cmd/backup/types" "github.com/pkg/sftp" + "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" ) type SshStorage struct { *GenericStorage - client *ssh.Client - sftpClient *sftp.Client - sshRemotePath string - sshHostName string + client *ssh.Client + sftpClient *sftp.Client } -func InitSSH(c *t.Config) (*SshStorage, error) { +func InitSSH(c *t.Config, l *logrus.Logger) (*SshStorage, error) { if c.SSHHostName == "" { return nil, nil } @@ -78,7 +77,11 @@ func InitSSH(c *t.Config) (*SshStorage, error) { } a := &GenericStorage{} - r := &SshStorage{a, sshClient, sftpClient, c.SSHRemotePath, c.SSHHostName} + r := &SshStorage{a, sshClient, sftpClient} + a.backupRetentionDays = c.BackupRetentionDays + a.backupPruningPrefix = c.BackupPruningPrefix + a.logger = l + a.config = c a.Storage = r return r, nil } @@ -91,7 +94,7 @@ func (sh *SshStorage) Copy(file string) error { } defer source.Close() - destination, err := sh.sftpClient.Create(filepath.Join(sh.sshRemotePath, name)) + destination, err := sh.sftpClient.Create(filepath.Join(sh.config.SSHRemotePath, name)) if err != nil { return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) } @@ -127,13 +130,13 @@ func (sh *SshStorage) Copy(file string) error { } } - sh.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, sh.sshHostName, sh.sshRemotePath) + sh.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, sh.config.SSHHostName, sh.config.SSHRemotePath) return nil } func (sh *SshStorage) Prune(deadline time.Time) (*t.StorageStats, error) { - candidates, err := sh.sftpClient.ReadDir(sh.sshRemotePath) + candidates, err := sh.sftpClient.ReadDir(sh.config.SSHRemotePath) if err != nil { return nil, fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) } @@ -155,7 +158,7 @@ func (sh *SshStorage) Prune(deadline time.Time) (*t.StorageStats, error) { sh.doPrune(len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { - if err := sh.sftpClient.Remove(filepath.Join(sh.sshRemotePath, match)); err != nil { + if err := sh.sftpClient.Remove(filepath.Join(sh.config.SSHRemotePath, match)); err != nil { return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) } } diff --git a/cmd/backup/storages/storage.go b/cmd/backup/storages/storage.go index ef203675..0867df3e 100644 --- a/cmd/backup/storages/storage.go +++ b/cmd/backup/storages/storage.go @@ -17,6 +17,7 @@ type GenericStorage struct { backupRetentionDays int32 backupPruningPrefix string logger *logrus.Logger + config *t.Config } // doPrune holds general control flow that applies to any kind of storage. diff --git a/cmd/backup/storages/webdav.go b/cmd/backup/storages/webdav.go index 482ee564..5ef4a12f 100644 --- a/cmd/backup/storages/webdav.go +++ b/cmd/backup/storages/webdav.go @@ -12,17 +12,16 @@ import ( "time" t "github.com/offen/docker-volume-backup/cmd/backup/types" + "github.com/sirupsen/logrus" "github.com/studio-b12/gowebdav" ) type WebDavStorage struct { *GenericStorage - client *gowebdav.Client - webdavUrl string - webdavPath string + client *gowebdav.Client } -func InitWebDav(c *t.Config) (*WebDavStorage, error) { +func InitWebDav(c *t.Config, l *logrus.Logger) (*WebDavStorage, error) { if c.WebdavUrl == "" { return nil, nil } @@ -43,7 +42,11 @@ func InitWebDav(c *t.Config) (*WebDavStorage, error) { } a := &GenericStorage{} - r := &WebDavStorage{a, webdavClient, c.WebdavUrl, c.WebdavPath} + r := &WebDavStorage{a, webdavClient} + a.backupRetentionDays = c.BackupRetentionDays + a.backupPruningPrefix = c.BackupPruningPrefix + a.logger = l + a.config = c a.Storage = r return r, nil } @@ -55,19 +58,19 @@ func (wd *WebDavStorage) Copy(file string) error { if err != nil { return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) } - if err := wd.client.MkdirAll(wd.webdavPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", wd.webdavPath, err) + if err := wd.client.MkdirAll(wd.config.WebdavPath, 0644); err != nil { + return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", wd.config.WebdavPath, err) } - if err := wd.client.Write(filepath.Join(wd.webdavPath, name), bytes, 0644); err != nil { + if err := wd.client.Write(filepath.Join(wd.config.WebdavPath, name), bytes, 0644); err != nil { return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) } - wd.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, wd.webdavUrl, wd.webdavPath) + wd.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, wd.config.WebdavUrl, wd.config.WebdavPath) return nil } func (wd *WebDavStorage) Prune(deadline time.Time) (*t.StorageStats, error) { - candidates, err := wd.client.ReadDir(wd.webdavPath) + candidates, err := wd.client.ReadDir(wd.config.WebdavPath) if err != nil { return nil, fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) } @@ -90,7 +93,7 @@ func (wd *WebDavStorage) Prune(deadline time.Time) (*t.StorageStats, error) { wd.doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { - if err := wd.client.Remove(filepath.Join(wd.webdavPath, match.Name())); err != nil { + if err := wd.client.Remove(filepath.Join(wd.config.WebdavPath, match.Name())); err != nil { return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) } } diff --git a/cmd/backup/types/config.go b/cmd/backup/types/config.go index b4dd1799..ff531e19 100644 --- a/cmd/backup/types/config.go +++ b/cmd/backup/types/config.go @@ -12,6 +12,15 @@ import ( // Config holds all configuration values that are expected to be set // by users. type Config struct { + AwsS3BucketName string `split_words:"true"` + AwsS3Path string `split_words:"true"` + AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"` + AwsEndpointProto string `split_words:"true" default:"https"` + AwsEndpointInsecure bool `split_words:"true"` + AwsStorageClass string `split_words:"true"` + AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` + AwsSecretAccessKey string `split_words:"true"` + AwsIamRoleEndpoint string `split_words:"true"` BackupSources string `split_words:"true" default:"/backup"` BackupFilename string `split_words:"true" default:"backup-%Y-%m-%dT%H-%M-%S.tar.gz"` BackupFilenameExpand bool `split_words:"true"` @@ -49,18 +58,6 @@ type Config struct { LockTimeout time.Duration `split_words:"true" default:"60m"` } -type S3Config struct { - AwsS3BucketName string `split_words:"true"` - AwsS3Path string `split_words:"true"` - AwsEndpoint string `split_words:"true" default:"s3.amazonaws.com"` - AwsEndpointProto string `split_words:"true" default:"https"` - AwsEndpointInsecure bool `split_words:"true"` - AwsStorageClass string `split_words:"true"` - AwsAccessKeyID string `envconfig:"AWS_ACCESS_KEY_ID"` - AwsSecretAccessKey string `split_words:"true"` - AwsIamRoleEndpoint string `split_words:"true"` -} - type RegexpDecoder struct { Re *regexp.Regexp } From d28b1edab22dcfe7108dd672fe55b2ec2c1ff03b Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Fri, 22 Jul 2022 19:42:58 +0200 Subject: [PATCH 08/32] Added StorageProviders to unify all backends. --- cmd/backup/script.go | 61 +++++------------------ cmd/backup/storages/providers.go | 83 ++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 48 deletions(-) create mode 100644 cmd/backup/storages/providers.go diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 29cd4f77..6f493601 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -34,16 +34,13 @@ import ( // script holds all the stateful information required to orchestrate a // single backup run. type script struct { - cli *client.Client - s3Storage *strg.S3Storage - webdavStorage *strg.WebDavStorage - sshStorage *strg.SshStorage - localStorage *strg.LocalStorage - logger *logrus.Logger - sender *router.ServiceRouter - template *template.Template - hooks []hook - hookLevel hookLevel + cli *client.Client + providers *strg.StorageProviders + logger *logrus.Logger + sender *router.ServiceRouter + template *template.Template + hooks []hook + hookLevel hookLevel file string stats *t.Stats @@ -102,20 +99,11 @@ func newScript() (*script, error) { s.cli = cli } - if s.s3Storage, err = strg.InitS3(s.c, s.logger); err != nil { + s.providers = &strg.StorageProviders{} + if err = s.providers.InitAll(s.c, s.logger); err != nil { return nil, err } - if s.webdavStorage, err = strg.InitWebDav(s.c, s.logger); err != nil { - return nil, err - } - - if s.sshStorage, err = strg.InitSSH(s.c, s.logger); err != nil { - return nil, err - } - - s.localStorage = strg.InitLocal(s.c, s.logger) - if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( "smtp://%s:%s@%s:%d/?from=%s&to=%s", @@ -430,21 +418,10 @@ func (s *script) copyArchive() error { } } - if s.s3Storage != nil { - s.s3Storage.Copy(s.file) - } - - if s.webdavStorage != nil { - s.webdavStorage.Copy(s.file) + if err := s.providers.CopyAll(s.file); err != nil { + return fmt.Errorf("copyBackup: error for at least one storage provider: %w", err) } - if s.sshStorage != nil { - s.sshStorage.Copy(s.file) - } - - if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - s.localStorage.Copy(s.file) - } return nil } @@ -458,20 +435,8 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) - if s.s3Storage != nil { - s.s3Storage.Prune(deadline) - } - - if s.webdavStorage != nil { - s.webdavStorage.Prune(deadline) - } - - if s.sshStorage != nil { - s.sshStorage.Prune(deadline) - } - - if _, err := os.Stat(s.c.BackupArchive); !os.IsNotExist(err) { - s.localStorage.Prune(deadline) + if err := s.providers.PruneAll(deadline); err != nil { + return fmt.Errorf("pruneBackup: error for at least one storage provider: %w", err) } return nil diff --git a/cmd/backup/storages/providers.go b/cmd/backup/storages/providers.go new file mode 100644 index 00000000..e0b1f72f --- /dev/null +++ b/cmd/backup/storages/providers.go @@ -0,0 +1,83 @@ +package storages + +import ( + "os" + "time" + + t "github.com/offen/docker-volume-backup/cmd/backup/types" + "github.com/sirupsen/logrus" +) + +type StorageProviders struct { + Local *LocalStorage + S3 *S3Storage + SSH *SshStorage + WebDav *WebDavStorage +} + +func (sp *StorageProviders) InitAll(c *t.Config, l *logrus.Logger) error { + var err error + if sp.S3, err = InitS3(c, l); err != nil { + return err + } + + if sp.WebDav, err = InitWebDav(c, l); err != nil { + return err + } + + if sp.SSH, err = InitSSH(c, l); err != nil { + return err + } + + sp.Local = InitLocal(c, l) + + return nil +} + +func (sp *StorageProviders) CopyAll(file string) error { + if sp.S3 != nil { + if err := sp.S3.Copy(file); err != nil { + return err + } + } + + if sp.WebDav != nil { + if err := sp.WebDav.Copy(file); err != nil { + return err + } + } + + if sp.SSH != nil { + if err := sp.SSH.Copy(file); err != nil { + return err + } + } + + if _, err := os.Stat(sp.Local.config.BackupArchive); !os.IsNotExist(err) { + if err := sp.Local.Copy(file); err != nil { + return err + } + } + + return nil +} + +func (sp *StorageProviders) PruneAll(deadline time.Time) error { + if sp.S3 != nil { + sp.S3.Prune(deadline) + } + + if sp.WebDav != nil { + sp.WebDav.Prune(deadline) + } + + if sp.SSH != nil { + sp.SSH.Prune(deadline) + } + + if _, err := os.Stat(sp.Local.config.BackupArchive); !os.IsNotExist(err) { + sp.Local.Prune(deadline) + } + + return nil +} From d60ed9d2297c13d079601cdb2037df92af5a0284 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Fri, 22 Jul 2022 20:33:18 +0200 Subject: [PATCH 09/32] Cleanup, optimizations, comments. --- cmd/backup/script.go | 4 ++-- cmd/backup/storages/local.go | 30 ++++++++++++++---------------- cmd/backup/storages/minio.go | 29 +++++++++++++---------------- cmd/backup/storages/providers.go | 30 +++++++++++++++++------------- cmd/backup/storages/ssh.go | 32 +++++++++++++++----------------- cmd/backup/storages/storage.go | 24 ++++++++++++------------ cmd/backup/storages/webdav.go | 30 ++++++++++++++---------------- 7 files changed, 87 insertions(+), 92 deletions(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 6f493601..f8b429ea 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -35,7 +35,7 @@ import ( // single backup run. type script struct { cli *client.Client - providers *strg.StorageProviders + providers *strg.StoragePool logger *logrus.Logger sender *router.ServiceRouter template *template.Template @@ -99,7 +99,7 @@ func newScript() (*script, error) { s.cli = cli } - s.providers = &strg.StorageProviders{} + s.providers = &strg.StoragePool{} if err = s.providers.InitAll(s.c, s.logger); err != nil { return nil, err } diff --git a/cmd/backup/storages/local.go b/cmd/backup/storages/local.go index b562fe06..53ce025a 100644 --- a/cmd/backup/storages/local.go +++ b/cmd/backup/storages/local.go @@ -16,43 +16,41 @@ type LocalStorage struct { *GenericStorage } +// Specific init procedure for the local storage provider. func InitLocal(c *t.Config, l *logrus.Logger) *LocalStorage { - a := &GenericStorage{} + a := &GenericStorage{&LocalStorage{}, l, c} r := &LocalStorage{a} - a.backupRetentionDays = c.BackupRetentionDays - a.backupPruningPrefix = c.BackupPruningPrefix - a.logger = l - a.config = c - a.Storage = r return r } -func (lc *LocalStorage) Copy(file string) error { +// Specific copy function for the local storage provider. +func (stg *LocalStorage) copy(file string) error { _, name := path.Split(file) - if err := u.CopyFile(file, path.Join(lc.config.BackupArchive, name)); err != nil { + if err := u.CopyFile(file, path.Join(stg.config.BackupArchive, name)); err != nil { return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) } - lc.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, lc.config.BackupArchive) + stg.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, stg.config.BackupArchive) - if lc.config.BackupLatestSymlink != "" { - symlink := path.Join(lc.config.BackupArchive, lc.config.BackupLatestSymlink) + if stg.config.BackupLatestSymlink != "" { + symlink := path.Join(stg.config.BackupArchive, stg.config.BackupLatestSymlink) if _, err := os.Lstat(symlink); err == nil { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) } - lc.logger.Infof("Created/Updated symlink `%s` for latest backup.", lc.config.BackupLatestSymlink) + stg.logger.Infof("Created/Updated symlink `%s` for latest backup.", stg.config.BackupLatestSymlink) } return nil } -func (lc *LocalStorage) Prune(deadline time.Time) (*t.StorageStats, error) { +// Specific prune function for the local storage provider. +func (stg *LocalStorage) prune(deadline time.Time) (*t.StorageStats, error) { globPattern := path.Join( - lc.config.BackupArchive, - fmt.Sprintf("%s*", lc.backupPruningPrefix), + stg.config.BackupArchive, + fmt.Sprintf("%s*", stg.config.BackupPruningPrefix), ) globMatches, err := filepath.Glob(globPattern) if err != nil { @@ -99,7 +97,7 @@ func (lc *LocalStorage) Prune(deadline time.Time) (*t.StorageStats, error) { Pruned: uint(len(matches)), } - lc.doPrune(len(matches), len(candidates), "local backup(s)", func() error { + stg.doPrune(len(matches), len(candidates), "local backup(s)", func() error { var removeErrors []error for _, match := range matches { if err := os.Remove(match); err != nil { diff --git a/cmd/backup/storages/minio.go b/cmd/backup/storages/minio.go index 0252d17b..338c5b7a 100644 --- a/cmd/backup/storages/minio.go +++ b/cmd/backup/storages/minio.go @@ -20,6 +20,7 @@ type S3Storage struct { client *minio.Client } +// Specific init procedure for the S3/Minio storage provider. func InitS3(c *t.Config, l *logrus.Logger) (*S3Storage, error) { if c.AwsS3BucketName == "" { return nil, nil @@ -61,34 +62,30 @@ func InitS3(c *t.Config, l *logrus.Logger) (*S3Storage, error) { return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) } - a := &GenericStorage{} + a := &GenericStorage{&S3Storage{}, l, c} r := &S3Storage{a, mc} - a.backupRetentionDays = c.BackupRetentionDays - a.backupPruningPrefix = c.BackupPruningPrefix - a.logger = l - a.config = c - a.Storage = r return r, nil } -func (s3 *S3Storage) Copy(file string) error { - s3.logger.Infof("copyArchive->s3stg: Beginning...") +// Specific copy function for the S3/Minio storage provider. +func (stg *S3Storage) copy(file string) error { _, name := path.Split(file) - if _, err := s3.client.FPutObject(context.Background(), s3.config.AwsS3BucketName, filepath.Join(s3.config.AwsS3Path, name), file, minio.PutObjectOptions{ + if _, err := stg.client.FPutObject(context.Background(), stg.config.AwsS3BucketName, filepath.Join(stg.config.AwsS3Path, name), file, minio.PutObjectOptions{ ContentType: "application/tar+gzip", - StorageClass: s3.config.AwsStorageClass, + StorageClass: stg.config.AwsStorageClass, }); err != nil { return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err) } - s3.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", file, s3.config.AwsS3BucketName) + stg.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.config.AwsS3BucketName) return nil } -func (s3 *S3Storage) Prune(deadline time.Time) (*t.StorageStats, error) { - candidates := s3.client.ListObjects(context.Background(), s3.config.AwsS3BucketName, minio.ListObjectsOptions{ +// Specific prune function for the S3/Minio storage provider. +func (stg *S3Storage) prune(deadline time.Time) (*t.StorageStats, error) { + candidates := stg.client.ListObjects(context.Background(), stg.config.AwsS3BucketName, minio.ListObjectsOptions{ WithMetadata: true, - Prefix: filepath.Join(s3.config.AwsS3Path, s3.backupPruningPrefix), + Prefix: filepath.Join(stg.config.AwsS3Path, stg.config.BackupPruningPrefix), Recursive: true, }) @@ -112,7 +109,7 @@ func (s3 *S3Storage) Prune(deadline time.Time) (*t.StorageStats, error) { Pruned: uint(len(matches)), } - s3.doPrune(len(matches), lenCandidates, "remote backup(s)", func() error { + stg.doPrune(len(matches), lenCandidates, "remote backup(s)", func() error { objectsCh := make(chan minio.ObjectInfo) go func() { for _, match := range matches { @@ -120,7 +117,7 @@ func (s3 *S3Storage) Prune(deadline time.Time) (*t.StorageStats, error) { } close(objectsCh) }() - errChan := s3.client.RemoveObjects(context.Background(), s3.config.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) + errChan := stg.client.RemoveObjects(context.Background(), stg.config.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) var removeErrors []error for result := range errChan { if result.Err != nil { diff --git a/cmd/backup/storages/providers.go b/cmd/backup/storages/providers.go index e0b1f72f..ecb61d7e 100644 --- a/cmd/backup/storages/providers.go +++ b/cmd/backup/storages/providers.go @@ -8,14 +8,16 @@ import ( "github.com/sirupsen/logrus" ) -type StorageProviders struct { +// A pool or collection of all implemented storage provider types. +type StoragePool struct { Local *LocalStorage S3 *S3Storage - SSH *SshStorage + SSH *SSHStorage WebDav *WebDavStorage } -func (sp *StorageProviders) InitAll(c *t.Config, l *logrus.Logger) error { +// Init procedure for all available storage providers. +func (sp *StoragePool) InitAll(c *t.Config, l *logrus.Logger) error { var err error if sp.S3, err = InitS3(c, l); err != nil { return err @@ -34,27 +36,28 @@ func (sp *StorageProviders) InitAll(c *t.Config, l *logrus.Logger) error { return nil } -func (sp *StorageProviders) CopyAll(file string) error { +// Copy function for all available storage providers. +func (sp *StoragePool) CopyAll(file string) error { if sp.S3 != nil { - if err := sp.S3.Copy(file); err != nil { + if err := sp.S3.copy(file); err != nil { return err } } if sp.WebDav != nil { - if err := sp.WebDav.Copy(file); err != nil { + if err := sp.WebDav.copy(file); err != nil { return err } } if sp.SSH != nil { - if err := sp.SSH.Copy(file); err != nil { + if err := sp.SSH.copy(file); err != nil { return err } } if _, err := os.Stat(sp.Local.config.BackupArchive); !os.IsNotExist(err) { - if err := sp.Local.Copy(file); err != nil { + if err := sp.Local.copy(file); err != nil { return err } } @@ -62,21 +65,22 @@ func (sp *StorageProviders) CopyAll(file string) error { return nil } -func (sp *StorageProviders) PruneAll(deadline time.Time) error { +// Prune function for all available storage providers. +func (sp *StoragePool) PruneAll(deadline time.Time) error { if sp.S3 != nil { - sp.S3.Prune(deadline) + sp.S3.prune(deadline) } if sp.WebDav != nil { - sp.WebDav.Prune(deadline) + sp.WebDav.prune(deadline) } if sp.SSH != nil { - sp.SSH.Prune(deadline) + sp.SSH.prune(deadline) } if _, err := os.Stat(sp.Local.config.BackupArchive); !os.IsNotExist(err) { - sp.Local.Prune(deadline) + sp.Local.prune(deadline) } return nil diff --git a/cmd/backup/storages/ssh.go b/cmd/backup/storages/ssh.go index 609b6b64..13577ff5 100644 --- a/cmd/backup/storages/ssh.go +++ b/cmd/backup/storages/ssh.go @@ -17,13 +17,14 @@ import ( "golang.org/x/crypto/ssh" ) -type SshStorage struct { +type SSHStorage struct { *GenericStorage client *ssh.Client sftpClient *sftp.Client } -func InitSSH(c *t.Config, l *logrus.Logger) (*SshStorage, error) { +// Specific init procedure for the SSH storage provider. +func InitSSH(c *t.Config, l *logrus.Logger) (*SSHStorage, error) { if c.SSHHostName == "" { return nil, nil } @@ -76,17 +77,13 @@ func InitSSH(c *t.Config, l *logrus.Logger) (*SshStorage, error) { return nil, fmt.Errorf("newScript: error creating sftp client: %w", err) } - a := &GenericStorage{} - r := &SshStorage{a, sshClient, sftpClient} - a.backupRetentionDays = c.BackupRetentionDays - a.backupPruningPrefix = c.BackupPruningPrefix - a.logger = l - a.config = c - a.Storage = r + a := &GenericStorage{&SSHStorage{}, l, c} + r := &SSHStorage{a, sshClient, sftpClient} return r, nil } -func (sh *SshStorage) Copy(file string) error { +// Specific copy function for the SSH storage provider. +func (stg *SSHStorage) copy(file string) error { source, err := os.Open(file) _, name := path.Split(file) if err != nil { @@ -94,7 +91,7 @@ func (sh *SshStorage) Copy(file string) error { } defer source.Close() - destination, err := sh.sftpClient.Create(filepath.Join(sh.config.SSHRemotePath, name)) + destination, err := stg.sftpClient.Create(filepath.Join(stg.config.SSHRemotePath, name)) if err != nil { return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) } @@ -130,20 +127,21 @@ func (sh *SshStorage) Copy(file string) error { } } - sh.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, sh.config.SSHHostName, sh.config.SSHRemotePath) + stg.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.config.SSHHostName, stg.config.SSHRemotePath) return nil } -func (sh *SshStorage) Prune(deadline time.Time) (*t.StorageStats, error) { - candidates, err := sh.sftpClient.ReadDir(sh.config.SSHRemotePath) +// Specific prune function for the SSH storage provider. +func (stg *SSHStorage) prune(deadline time.Time) (*t.StorageStats, error) { + candidates, err := stg.sftpClient.ReadDir(stg.config.SSHRemotePath) if err != nil { return nil, fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) } var matches []string for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), sh.backupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), stg.config.BackupPruningPrefix) { continue } if candidate.ModTime().Before(deadline) { @@ -156,9 +154,9 @@ func (sh *SshStorage) Prune(deadline time.Time) (*t.StorageStats, error) { Pruned: uint(len(matches)), } - sh.doPrune(len(matches), len(candidates), "SSH backup(s)", func() error { + stg.doPrune(len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { - if err := sh.sftpClient.Remove(filepath.Join(sh.config.SSHRemotePath, match)); err != nil { + if err := stg.sftpClient.Remove(filepath.Join(stg.config.SSHRemotePath, match)); err != nil { return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) } } diff --git a/cmd/backup/storages/storage.go b/cmd/backup/storages/storage.go index 0867df3e..09122e4f 100644 --- a/cmd/backup/storages/storage.go +++ b/cmd/backup/storages/storage.go @@ -7,38 +7,38 @@ import ( "github.com/sirupsen/logrus" ) +// Interface for defining functions which all storage providers support. type Storage interface { - Copy(file string) error - Prune(deadline time.Time) (*t.StorageStats, error) + copy(file string) error + prune(deadline time.Time) (*t.StorageStats, error) } +// Generic type of storage. Everything here are common properties of all storage types. type GenericStorage struct { Storage - backupRetentionDays int32 - backupPruningPrefix string - logger *logrus.Logger - config *t.Config + logger *logrus.Logger + config *t.Config } // doPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. -func (s *GenericStorage) doPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { +func (stg *GenericStorage) doPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { if lenMatches != 0 && lenMatches != lenCandidates { if err := doRemoveFiles(); err != nil { return err } - s.logger.Infof( + stg.logger.Infof( "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", lenMatches, lenCandidates, description, - s.backupRetentionDays, + stg.config.BackupRetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { - s.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) - s.logger.Warn("Refusing to do so, please check your configuration.") + stg.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) + stg.logger.Warn("Refusing to do so, please check your configuration.") } else { - s.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) + stg.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) } return nil } diff --git a/cmd/backup/storages/webdav.go b/cmd/backup/storages/webdav.go index 5ef4a12f..8b5851c9 100644 --- a/cmd/backup/storages/webdav.go +++ b/cmd/backup/storages/webdav.go @@ -21,6 +21,7 @@ type WebDavStorage struct { client *gowebdav.Client } +// Specific init procedure for the WebDav storage provider. func InitWebDav(c *t.Config, l *logrus.Logger) (*WebDavStorage, error) { if c.WebdavUrl == "" { return nil, nil @@ -41,43 +42,40 @@ func InitWebDav(c *t.Config, l *logrus.Logger) (*WebDavStorage, error) { webdavClient.SetTransport(webdavTransport) } - a := &GenericStorage{} + a := &GenericStorage{&WebDavStorage{}, l, c} r := &WebDavStorage{a, webdavClient} - a.backupRetentionDays = c.BackupRetentionDays - a.backupPruningPrefix = c.BackupPruningPrefix - a.logger = l - a.config = c - a.Storage = r return r, nil } } -func (wd *WebDavStorage) Copy(file string) error { +// Specific copy function for the WebDav storage provider. +func (stg *WebDavStorage) copy(file string) error { bytes, err := os.ReadFile(file) _, name := path.Split(file) if err != nil { return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) } - if err := wd.client.MkdirAll(wd.config.WebdavPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", wd.config.WebdavPath, err) + if err := stg.client.MkdirAll(stg.config.WebdavPath, 0644); err != nil { + return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", stg.config.WebdavPath, err) } - if err := wd.client.Write(filepath.Join(wd.config.WebdavPath, name), bytes, 0644); err != nil { + if err := stg.client.Write(filepath.Join(stg.config.WebdavPath, name), bytes, 0644); err != nil { return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) } - wd.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, wd.config.WebdavUrl, wd.config.WebdavPath) + stg.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.config.WebdavUrl, stg.config.WebdavPath) return nil } -func (wd *WebDavStorage) Prune(deadline time.Time) (*t.StorageStats, error) { - candidates, err := wd.client.ReadDir(wd.config.WebdavPath) +// Specific prune function for the WebDav storage provider. +func (stg *WebDavStorage) prune(deadline time.Time) (*t.StorageStats, error) { + candidates, err := stg.client.ReadDir(stg.config.WebdavPath) if err != nil { return nil, fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) } var matches []fs.FileInfo var lenCandidates int for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), wd.backupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), stg.config.BackupPruningPrefix) { continue } lenCandidates++ @@ -91,9 +89,9 @@ func (wd *WebDavStorage) Prune(deadline time.Time) (*t.StorageStats, error) { Pruned: uint(len(matches)), } - wd.doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { + stg.doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { - if err := wd.client.Remove(filepath.Join(wd.config.WebdavPath, match.Name())); err != nil { + if err := stg.client.Remove(filepath.Join(stg.config.WebdavPath, match.Name())); err != nil { return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) } } From 2c0e56bfa5b44aafcb468263f3a334302f875c33 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 24 Jul 2022 15:41:31 +0200 Subject: [PATCH 10/32] Applied discussed changes. See description. Moved modules to internal packages. Replaced StoragePool with slice. Moved conditional for init of storage backends back to script. --- .gitignore | 3 + cmd/backup/hooks.go | 2 +- cmd/backup/lock.go | 2 +- cmd/backup/notifications.go | 4 +- cmd/backup/script.go | 64 ++++++++++---- cmd/backup/storages/providers.go | 87 ------------------- .../storage/local}/local.go | 52 ++++++----- .../minio.go => internal/storage/s3/s3.go | 51 ++++++----- .../storages => internal/storage/ssh}/ssh.go | 46 +++++----- .../storages => internal/storage}/storage.go | 28 +++--- .../storage/webdav}/webdav.go | 50 ++++++----- {cmd/backup => internal}/types/config.go | 0 {cmd/backup => internal}/types/stats.go | 0 {cmd/backup => internal}/utilities/util.go | 0 14 files changed, 179 insertions(+), 210 deletions(-) create mode 100644 .gitignore delete mode 100644 cmd/backup/storages/providers.go rename {cmd/backup/storages => internal/storage/local}/local.go (61%) rename cmd/backup/storages/minio.go => internal/storage/s3/s3.go (70%) rename {cmd/backup/storages => internal/storage/ssh}/ssh.go (76%) rename {cmd/backup/storages => internal/storage}/storage.go (61%) rename {cmd/backup/storages => internal/storage/webdav}/webdav.go (61%) rename {cmd/backup => internal}/types/config.go (100%) rename {cmd/backup => internal}/types/stats.go (100%) rename {cmd/backup => internal}/utilities/util.go (100%) diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..460ad511 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ + +.env +cmd/backup/backup diff --git a/cmd/backup/hooks.go b/cmd/backup/hooks.go index b5277eda..96f2edc5 100644 --- a/cmd/backup/hooks.go +++ b/cmd/backup/hooks.go @@ -7,7 +7,7 @@ import ( "fmt" "sort" - u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + u "github.com/offen/docker-volume-backup/internal/utilities" ) // hook contains a queued action that can be trigger them when the script diff --git a/cmd/backup/lock.go b/cmd/backup/lock.go index 417206fc..dbb7d835 100644 --- a/cmd/backup/lock.go +++ b/cmd/backup/lock.go @@ -9,7 +9,7 @@ import ( "time" "github.com/gofrs/flock" - u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + u "github.com/offen/docker-volume-backup/internal/utilities" ) // lock opens a lockfile at the given location, keeping it locked until the diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index 1bd65b34..52fd5140 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -12,8 +12,8 @@ import ( "time" sTypes "github.com/containrrr/shoutrrr/pkg/types" - t "github.com/offen/docker-volume-backup/cmd/backup/types" - u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + t "github.com/offen/docker-volume-backup/internal/types" + u "github.com/offen/docker-volume-backup/internal/utilities" ) //go:embed notifications.tmpl diff --git a/cmd/backup/script.go b/cmd/backup/script.go index f8b429ea..74cd5793 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -14,9 +14,13 @@ import ( "text/template" "time" - strg "github.com/offen/docker-volume-backup/cmd/backup/storages" - t "github.com/offen/docker-volume-backup/cmd/backup/types" - u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + strg "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/storage/local" + "github.com/offen/docker-volume-backup/internal/storage/s3" + "github.com/offen/docker-volume-backup/internal/storage/ssh" + "github.com/offen/docker-volume-backup/internal/storage/webdav" + t "github.com/offen/docker-volume-backup/internal/types" + u "github.com/offen/docker-volume-backup/internal/utilities" "github.com/containrrr/shoutrrr" "github.com/containrrr/shoutrrr/pkg/router" @@ -34,13 +38,13 @@ import ( // script holds all the stateful information required to orchestrate a // single backup run. type script struct { - cli *client.Client - providers *strg.StoragePool - logger *logrus.Logger - sender *router.ServiceRouter - template *template.Template - hooks []hook - hookLevel hookLevel + cli *client.Client + storagePool []*strg.StorageBackend + logger *logrus.Logger + sender *router.ServiceRouter + template *template.Template + hooks []hook + hookLevel hookLevel file string stats *t.Stats @@ -99,11 +103,33 @@ func newScript() (*script, error) { s.cli = cli } - s.providers = &strg.StoragePool{} - if err = s.providers.InitAll(s.c, s.logger); err != nil { - return nil, err + if s.c.AwsS3BucketName != "" { + if s3Backend, err := s3.InitS3(s.c, s.logger, s.stats); err != nil { + return nil, err + } else { + s.storagePool = append(s.storagePool, s3Backend) + } + } + + if s.c.WebdavUrl != "" { + if webdavBackend, err := webdav.InitWebDav(s.c, s.logger, s.stats); err != nil { + return nil, err + } else { + s.storagePool = append(s.storagePool, webdavBackend) + } } + if s.c.SSHHostName != "" { + if sshBackend, err := ssh.InitSSH(s.c, s.logger, s.stats); err != nil { + return nil, err + } else { + s.storagePool = append(s.storagePool, sshBackend) + } + } + + localBackend := local.InitLocal(s.c, s.logger, s.stats) + s.storagePool = append(s.storagePool, localBackend) + if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( "smtp://%s:%s@%s:%d/?from=%s&to=%s", @@ -418,8 +444,10 @@ func (s *script) copyArchive() error { } } - if err := s.providers.CopyAll(s.file); err != nil { - return fmt.Errorf("copyBackup: error for at least one storage provider: %w", err) + for _, backend := range s.storagePool { + if err := backend.Copy(s.file); err != nil { + return err + } } return nil @@ -435,8 +463,10 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) - if err := s.providers.PruneAll(deadline); err != nil { - return fmt.Errorf("pruneBackup: error for at least one storage provider: %w", err) + for _, backend := range s.storagePool { + if err := backend.Prune(deadline); err != nil { + return err + } } return nil diff --git a/cmd/backup/storages/providers.go b/cmd/backup/storages/providers.go deleted file mode 100644 index ecb61d7e..00000000 --- a/cmd/backup/storages/providers.go +++ /dev/null @@ -1,87 +0,0 @@ -package storages - -import ( - "os" - "time" - - t "github.com/offen/docker-volume-backup/cmd/backup/types" - "github.com/sirupsen/logrus" -) - -// A pool or collection of all implemented storage provider types. -type StoragePool struct { - Local *LocalStorage - S3 *S3Storage - SSH *SSHStorage - WebDav *WebDavStorage -} - -// Init procedure for all available storage providers. -func (sp *StoragePool) InitAll(c *t.Config, l *logrus.Logger) error { - var err error - if sp.S3, err = InitS3(c, l); err != nil { - return err - } - - if sp.WebDav, err = InitWebDav(c, l); err != nil { - return err - } - - if sp.SSH, err = InitSSH(c, l); err != nil { - return err - } - - sp.Local = InitLocal(c, l) - - return nil -} - -// Copy function for all available storage providers. -func (sp *StoragePool) CopyAll(file string) error { - if sp.S3 != nil { - if err := sp.S3.copy(file); err != nil { - return err - } - } - - if sp.WebDav != nil { - if err := sp.WebDav.copy(file); err != nil { - return err - } - } - - if sp.SSH != nil { - if err := sp.SSH.copy(file); err != nil { - return err - } - } - - if _, err := os.Stat(sp.Local.config.BackupArchive); !os.IsNotExist(err) { - if err := sp.Local.copy(file); err != nil { - return err - } - } - - return nil -} - -// Prune function for all available storage providers. -func (sp *StoragePool) PruneAll(deadline time.Time) error { - if sp.S3 != nil { - sp.S3.prune(deadline) - } - - if sp.WebDav != nil { - sp.WebDav.prune(deadline) - } - - if sp.SSH != nil { - sp.SSH.prune(deadline) - } - - if _, err := os.Stat(sp.Local.config.BackupArchive); !os.IsNotExist(err) { - sp.Local.prune(deadline) - } - - return nil -} diff --git a/cmd/backup/storages/local.go b/internal/storage/local/local.go similarity index 61% rename from cmd/backup/storages/local.go rename to internal/storage/local/local.go index 53ce025a..1a2e7906 100644 --- a/cmd/backup/storages/local.go +++ b/internal/storage/local/local.go @@ -1,4 +1,4 @@ -package storages +package local import ( "fmt" @@ -7,54 +7,62 @@ import ( "path/filepath" "time" - t "github.com/offen/docker-volume-backup/cmd/backup/types" - u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + strg "github.com/offen/docker-volume-backup/internal/storage" + t "github.com/offen/docker-volume-backup/internal/types" + u "github.com/offen/docker-volume-backup/internal/utilities" "github.com/sirupsen/logrus" ) type LocalStorage struct { - *GenericStorage + *strg.StorageBackend } // Specific init procedure for the local storage provider. -func InitLocal(c *t.Config, l *logrus.Logger) *LocalStorage { - a := &GenericStorage{&LocalStorage{}, l, c} +func InitLocal(c *t.Config, l *logrus.Logger, s *t.Stats) *strg.StorageBackend { + a := &strg.StorageBackend{ + Storage: &LocalStorage{}, + Name: "Local", + Logger: l, + Config: c, + Stats: s, + } r := &LocalStorage{a} - return r + a.Storage = r + return a } // Specific copy function for the local storage provider. -func (stg *LocalStorage) copy(file string) error { +func (stg *LocalStorage) Copy(file string) error { _, name := path.Split(file) - if err := u.CopyFile(file, path.Join(stg.config.BackupArchive, name)); err != nil { + if err := u.CopyFile(file, path.Join(stg.Config.BackupArchive, name)); err != nil { return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) } - stg.logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, stg.config.BackupArchive) + stg.Logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, stg.Config.BackupArchive) - if stg.config.BackupLatestSymlink != "" { - symlink := path.Join(stg.config.BackupArchive, stg.config.BackupLatestSymlink) + if stg.Config.BackupLatestSymlink != "" { + symlink := path.Join(stg.Config.BackupArchive, stg.Config.BackupLatestSymlink) if _, err := os.Lstat(symlink); err == nil { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) } - stg.logger.Infof("Created/Updated symlink `%s` for latest backup.", stg.config.BackupLatestSymlink) + stg.Logger.Infof("Created/Updated symlink `%s` for latest backup.", stg.Config.BackupLatestSymlink) } return nil } // Specific prune function for the local storage provider. -func (stg *LocalStorage) prune(deadline time.Time) (*t.StorageStats, error) { +func (stg *LocalStorage) Prune(deadline time.Time) error { globPattern := path.Join( - stg.config.BackupArchive, - fmt.Sprintf("%s*", stg.config.BackupPruningPrefix), + stg.Config.BackupArchive, + fmt.Sprintf("%s*", stg.Config.BackupPruningPrefix), ) globMatches, err := filepath.Glob(globPattern) if err != nil { - return nil, fmt.Errorf( + return fmt.Errorf( "pruneBackups: error looking up matching files using pattern %s: %w", globPattern, err, @@ -65,7 +73,7 @@ func (stg *LocalStorage) prune(deadline time.Time) (*t.StorageStats, error) { for _, candidate := range globMatches { fi, err := os.Lstat(candidate) if err != nil { - return nil, fmt.Errorf( + return fmt.Errorf( "pruneBackups: error calling Lstat on file %s: %w", candidate, err, @@ -81,7 +89,7 @@ func (stg *LocalStorage) prune(deadline time.Time) (*t.StorageStats, error) { for _, candidate := range candidates { fi, err := os.Stat(candidate) if err != nil { - return nil, fmt.Errorf( + return fmt.Errorf( "pruneBackups: error calling stat on file %s: %w", candidate, err, @@ -92,12 +100,12 @@ func (stg *LocalStorage) prune(deadline time.Time) (*t.StorageStats, error) { } } - stats := t.StorageStats{ + stg.Stats.Storages.Local = t.StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } - stg.doPrune(len(matches), len(candidates), "local backup(s)", func() error { + stg.DoPrune(len(matches), len(candidates), "local backup(s)", func() error { var removeErrors []error for _, match := range matches { if err := os.Remove(match); err != nil { @@ -114,5 +122,5 @@ func (stg *LocalStorage) prune(deadline time.Time) (*t.StorageStats, error) { return nil }) - return &stats, nil + return nil } diff --git a/cmd/backup/storages/minio.go b/internal/storage/s3/s3.go similarity index 70% rename from cmd/backup/storages/minio.go rename to internal/storage/s3/s3.go index 338c5b7a..3271ad69 100644 --- a/cmd/backup/storages/minio.go +++ b/internal/storage/s3/s3.go @@ -1,4 +1,4 @@ -package storages +package s3 import ( "context" @@ -10,22 +10,19 @@ import ( "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" - t "github.com/offen/docker-volume-backup/cmd/backup/types" - u "github.com/offen/docker-volume-backup/cmd/backup/utilities" + strg "github.com/offen/docker-volume-backup/internal/storage" + t "github.com/offen/docker-volume-backup/internal/types" + u "github.com/offen/docker-volume-backup/internal/utilities" "github.com/sirupsen/logrus" ) type S3Storage struct { - *GenericStorage + *strg.StorageBackend client *minio.Client } // Specific init procedure for the S3/Minio storage provider. -func InitS3(c *t.Config, l *logrus.Logger) (*S3Storage, error) { - if c.AwsS3BucketName == "" { - return nil, nil - } - +func InitS3(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, error) { var creds *credentials.Credentials if c.AwsAccessKeyID != "" && c.AwsSecretAccessKey != "" { creds = credentials.NewStaticV4( @@ -62,30 +59,38 @@ func InitS3(c *t.Config, l *logrus.Logger) (*S3Storage, error) { return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) } - a := &GenericStorage{&S3Storage{}, l, c} + a := &strg.StorageBackend{ + Storage: &S3Storage{}, + Name: "S3", + Logger: l, + Config: c, + Stats: s, + } r := &S3Storage{a, mc} - return r, nil + a.Storage = r + return a, nil } // Specific copy function for the S3/Minio storage provider. -func (stg *S3Storage) copy(file string) error { +func (stg *S3Storage) Copy(file string) error { _, name := path.Split(file) - if _, err := stg.client.FPutObject(context.Background(), stg.config.AwsS3BucketName, filepath.Join(stg.config.AwsS3Path, name), file, minio.PutObjectOptions{ + + if _, err := stg.client.FPutObject(context.Background(), stg.Config.AwsS3BucketName, filepath.Join(stg.Config.AwsS3Path, name), file, minio.PutObjectOptions{ ContentType: "application/tar+gzip", - StorageClass: stg.config.AwsStorageClass, + StorageClass: stg.Config.AwsStorageClass, }); err != nil { return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err) } - stg.logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.config.AwsS3BucketName) + stg.Logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.Config.AwsS3BucketName) return nil } // Specific prune function for the S3/Minio storage provider. -func (stg *S3Storage) prune(deadline time.Time) (*t.StorageStats, error) { - candidates := stg.client.ListObjects(context.Background(), stg.config.AwsS3BucketName, minio.ListObjectsOptions{ +func (stg *S3Storage) Prune(deadline time.Time) error { + candidates := stg.client.ListObjects(context.Background(), stg.Config.AwsS3BucketName, minio.ListObjectsOptions{ WithMetadata: true, - Prefix: filepath.Join(stg.config.AwsS3Path, stg.config.BackupPruningPrefix), + Prefix: filepath.Join(stg.Config.AwsS3Path, stg.Config.BackupPruningPrefix), Recursive: true, }) @@ -94,7 +99,7 @@ func (stg *S3Storage) prune(deadline time.Time) (*t.StorageStats, error) { for candidate := range candidates { lenCandidates++ if candidate.Err != nil { - return nil, fmt.Errorf( + return fmt.Errorf( "pruneBackups: error looking up candidates from remote storage: %w", candidate.Err, ) @@ -104,12 +109,12 @@ func (stg *S3Storage) prune(deadline time.Time) (*t.StorageStats, error) { } } - stats := t.StorageStats{ + stg.Stats.Storages.S3 = t.StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } - stg.doPrune(len(matches), lenCandidates, "remote backup(s)", func() error { + stg.DoPrune(len(matches), lenCandidates, "remote backup(s)", func() error { objectsCh := make(chan minio.ObjectInfo) go func() { for _, match := range matches { @@ -117,7 +122,7 @@ func (stg *S3Storage) prune(deadline time.Time) (*t.StorageStats, error) { } close(objectsCh) }() - errChan := stg.client.RemoveObjects(context.Background(), stg.config.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) + errChan := stg.client.RemoveObjects(context.Background(), stg.Config.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) var removeErrors []error for result := range errChan { if result.Err != nil { @@ -130,5 +135,5 @@ func (stg *S3Storage) prune(deadline time.Time) (*t.StorageStats, error) { return nil }) - return &stats, nil + return nil } diff --git a/cmd/backup/storages/ssh.go b/internal/storage/ssh/ssh.go similarity index 76% rename from cmd/backup/storages/ssh.go rename to internal/storage/ssh/ssh.go index 13577ff5..37d3b0e0 100644 --- a/cmd/backup/storages/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -1,4 +1,4 @@ -package storages +package ssh import ( "errors" @@ -11,24 +11,21 @@ import ( "strings" "time" - t "github.com/offen/docker-volume-backup/cmd/backup/types" + strg "github.com/offen/docker-volume-backup/internal/storage" + t "github.com/offen/docker-volume-backup/internal/types" "github.com/pkg/sftp" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" ) type SSHStorage struct { - *GenericStorage + *strg.StorageBackend client *ssh.Client sftpClient *sftp.Client } // Specific init procedure for the SSH storage provider. -func InitSSH(c *t.Config, l *logrus.Logger) (*SSHStorage, error) { - if c.SSHHostName == "" { - return nil, nil - } - +func InitSSH(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, error) { var authMethods []ssh.AuthMethod if c.SSHPassword != "" { @@ -77,13 +74,20 @@ func InitSSH(c *t.Config, l *logrus.Logger) (*SSHStorage, error) { return nil, fmt.Errorf("newScript: error creating sftp client: %w", err) } - a := &GenericStorage{&SSHStorage{}, l, c} + a := &strg.StorageBackend{ + Storage: &SSHStorage{}, + Name: "SSH", + Logger: l, + Config: c, + Stats: s, + } r := &SSHStorage{a, sshClient, sftpClient} - return r, nil + a.Storage = r + return a, nil } // Specific copy function for the SSH storage provider. -func (stg *SSHStorage) copy(file string) error { +func (stg *SSHStorage) Copy(file string) error { source, err := os.Open(file) _, name := path.Split(file) if err != nil { @@ -91,7 +95,7 @@ func (stg *SSHStorage) copy(file string) error { } defer source.Close() - destination, err := stg.sftpClient.Create(filepath.Join(stg.config.SSHRemotePath, name)) + destination, err := stg.sftpClient.Create(filepath.Join(stg.Config.SSHRemotePath, name)) if err != nil { return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) } @@ -127,21 +131,21 @@ func (stg *SSHStorage) copy(file string) error { } } - stg.logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.config.SSHHostName, stg.config.SSHRemotePath) + stg.Logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.Config.SSHHostName, stg.Config.SSHRemotePath) return nil } // Specific prune function for the SSH storage provider. -func (stg *SSHStorage) prune(deadline time.Time) (*t.StorageStats, error) { - candidates, err := stg.sftpClient.ReadDir(stg.config.SSHRemotePath) +func (stg *SSHStorage) Prune(deadline time.Time) error { + candidates, err := stg.sftpClient.ReadDir(stg.Config.SSHRemotePath) if err != nil { - return nil, fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) + return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) } var matches []string for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), stg.config.BackupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), stg.Config.BackupPruningPrefix) { continue } if candidate.ModTime().Before(deadline) { @@ -149,19 +153,19 @@ func (stg *SSHStorage) prune(deadline time.Time) (*t.StorageStats, error) { } } - stats := t.StorageStats{ + stg.Stats.Storages.SSH = t.StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } - stg.doPrune(len(matches), len(candidates), "SSH backup(s)", func() error { + stg.DoPrune(len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { - if err := stg.sftpClient.Remove(filepath.Join(stg.config.SSHRemotePath, match)); err != nil { + if err := stg.sftpClient.Remove(filepath.Join(stg.Config.SSHRemotePath, match)); err != nil { return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) } } return nil }) - return &stats, nil + return nil } diff --git a/cmd/backup/storages/storage.go b/internal/storage/storage.go similarity index 61% rename from cmd/backup/storages/storage.go rename to internal/storage/storage.go index 09122e4f..24ff4fd6 100644 --- a/cmd/backup/storages/storage.go +++ b/internal/storage/storage.go @@ -1,44 +1,46 @@ -package storages +package storage import ( "time" - t "github.com/offen/docker-volume-backup/cmd/backup/types" + t "github.com/offen/docker-volume-backup/internal/types" "github.com/sirupsen/logrus" ) // Interface for defining functions which all storage providers support. type Storage interface { - copy(file string) error - prune(deadline time.Time) (*t.StorageStats, error) + Copy(file string) error + Prune(deadline time.Time) error } // Generic type of storage. Everything here are common properties of all storage types. -type GenericStorage struct { +type StorageBackend struct { Storage - logger *logrus.Logger - config *t.Config + Name string + Logger *logrus.Logger + Config *t.Config + Stats *t.Stats } // doPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. -func (stg *GenericStorage) doPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { +func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { if lenMatches != 0 && lenMatches != lenCandidates { if err := doRemoveFiles(); err != nil { return err } - stg.logger.Infof( + stg.Logger.Infof( "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", lenMatches, lenCandidates, description, - stg.config.BackupRetentionDays, + stg.Config.BackupRetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { - stg.logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) - stg.logger.Warn("Refusing to do so, please check your configuration.") + stg.Logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) + stg.Logger.Warn("Refusing to do so, please check your configuration.") } else { - stg.logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) + stg.Logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) } return nil } diff --git a/cmd/backup/storages/webdav.go b/internal/storage/webdav/webdav.go similarity index 61% rename from cmd/backup/storages/webdav.go rename to internal/storage/webdav/webdav.go index 8b5851c9..6a078d81 100644 --- a/cmd/backup/storages/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -1,4 +1,4 @@ -package storages +package webdav import ( "errors" @@ -11,22 +11,19 @@ import ( "strings" "time" - t "github.com/offen/docker-volume-backup/cmd/backup/types" + strg "github.com/offen/docker-volume-backup/internal/storage" + t "github.com/offen/docker-volume-backup/internal/types" "github.com/sirupsen/logrus" "github.com/studio-b12/gowebdav" ) type WebDavStorage struct { - *GenericStorage + *strg.StorageBackend client *gowebdav.Client } // Specific init procedure for the WebDav storage provider. -func InitWebDav(c *t.Config, l *logrus.Logger) (*WebDavStorage, error) { - if c.WebdavUrl == "" { - return nil, nil - } - +func InitWebDav(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, error) { if c.WebdavUsername == "" || c.WebdavPassword == "" { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") } else { @@ -42,40 +39,47 @@ func InitWebDav(c *t.Config, l *logrus.Logger) (*WebDavStorage, error) { webdavClient.SetTransport(webdavTransport) } - a := &GenericStorage{&WebDavStorage{}, l, c} + a := &strg.StorageBackend{ + Storage: &WebDavStorage{}, + Name: "WebDav", + Logger: l, + Config: c, + Stats: s, + } r := &WebDavStorage{a, webdavClient} - return r, nil + a.Storage = r + return a, nil } } // Specific copy function for the WebDav storage provider. -func (stg *WebDavStorage) copy(file string) error { +func (stg *WebDavStorage) Copy(file string) error { bytes, err := os.ReadFile(file) _, name := path.Split(file) if err != nil { return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) } - if err := stg.client.MkdirAll(stg.config.WebdavPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", stg.config.WebdavPath, err) + if err := stg.client.MkdirAll(stg.Config.WebdavPath, 0644); err != nil { + return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", stg.Config.WebdavPath, err) } - if err := stg.client.Write(filepath.Join(stg.config.WebdavPath, name), bytes, 0644); err != nil { + if err := stg.client.Write(filepath.Join(stg.Config.WebdavPath, name), bytes, 0644); err != nil { return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) } - stg.logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.config.WebdavUrl, stg.config.WebdavPath) + stg.Logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.Config.WebdavUrl, stg.Config.WebdavPath) return nil } // Specific prune function for the WebDav storage provider. -func (stg *WebDavStorage) prune(deadline time.Time) (*t.StorageStats, error) { - candidates, err := stg.client.ReadDir(stg.config.WebdavPath) +func (stg *WebDavStorage) Prune(deadline time.Time) error { + candidates, err := stg.client.ReadDir(stg.Config.WebdavPath) if err != nil { - return nil, fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) + return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) } var matches []fs.FileInfo var lenCandidates int for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), stg.config.BackupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), stg.Config.BackupPruningPrefix) { continue } lenCandidates++ @@ -84,19 +88,19 @@ func (stg *WebDavStorage) prune(deadline time.Time) (*t.StorageStats, error) { } } - stats := t.StorageStats{ + stg.Stats.Storages.WebDAV = t.StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } - stg.doPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { + stg.DoPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { - if err := stg.client.Remove(filepath.Join(stg.config.WebdavPath, match.Name())); err != nil { + if err := stg.client.Remove(filepath.Join(stg.Config.WebdavPath, match.Name())); err != nil { return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) } } return nil }) - return &stats, nil + return nil } diff --git a/cmd/backup/types/config.go b/internal/types/config.go similarity index 100% rename from cmd/backup/types/config.go rename to internal/types/config.go diff --git a/cmd/backup/types/stats.go b/internal/types/stats.go similarity index 100% rename from cmd/backup/types/stats.go rename to internal/types/stats.go diff --git a/cmd/backup/utilities/util.go b/internal/utilities/util.go similarity index 100% rename from cmd/backup/utilities/util.go rename to internal/utilities/util.go From 6df4ef05ec7f3a6e6660ccc2688e05b34caa25ae Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 24 Jul 2022 15:53:39 +0200 Subject: [PATCH 11/32] Fix docker build issue --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 8d649681..dbfeac63 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ WORKDIR /app COPY go.mod go.sum ./ RUN go mod download COPY cmd/backup ./cmd/backup/ +COPY internal ./internal WORKDIR /app/cmd/backup RUN go build -o backup . From 09477099f55bf637876b3c33469630019c3414b6 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 24 Jul 2022 16:02:11 +0200 Subject: [PATCH 12/32] Fixed accidentally removed local copy condition. --- internal/storage/local/local.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 1a2e7906..4570e85a 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -33,6 +33,10 @@ func InitLocal(c *t.Config, l *logrus.Logger, s *t.Stats) *strg.StorageBackend { // Specific copy function for the local storage provider. func (stg *LocalStorage) Copy(file string) error { + if _, err := os.Stat(stg.Config.BackupArchive); os.IsNotExist(err) { + return nil + } + _, name := path.Split(file) if err := u.CopyFile(file, path.Join(stg.Config.BackupArchive, name)); err != nil { From 068a1243f1d0b5db5b396e1801964f91f3684f1c Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 24 Jul 2022 18:34:07 +0200 Subject: [PATCH 13/32] Delete .gitignore --- .gitignore | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 .gitignore diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 460ad511..00000000 --- a/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ - -.env -cmd/backup/backup From 2d85858da9989a178648d7b1d8fc45ec264ad8f9 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 24 Jul 2022 19:30:11 +0200 Subject: [PATCH 14/32] Renaming/changes according to review Renamed Init functions and interface. Replaced config object with specific config values. Init func returns interface instead of struct. Removed custom import names where possible. --- cmd/backup/hooks.go | 4 +- cmd/backup/lock.go | 6 +-- cmd/backup/notifications.go | 4 +- cmd/backup/script.go | 39 ++++++++------- internal/storage/local/local.go | 64 +++++++++++++----------- internal/storage/s3/s3.go | 83 +++++++++++++++++-------------- internal/storage/ssh/ssh.go | 74 +++++++++++++++------------ internal/storage/storage.go | 19 +++---- internal/storage/webdav/webdav.go | 66 ++++++++++++------------ 9 files changed, 194 insertions(+), 165 deletions(-) diff --git a/cmd/backup/hooks.go b/cmd/backup/hooks.go index 96f2edc5..92ade4b1 100644 --- a/cmd/backup/hooks.go +++ b/cmd/backup/hooks.go @@ -7,7 +7,7 @@ import ( "fmt" "sort" - u "github.com/offen/docker-volume-backup/internal/utilities" + utilites "github.com/offen/docker-volume-backup/internal/utilities" ) // hook contains a queued action that can be trigger them when the script @@ -52,7 +52,7 @@ func (s *script) runHooks(err error) error { } } if len(actionErrors) != 0 { - return u.Join(actionErrors...) + return utilites.Join(actionErrors...) } return nil } diff --git a/cmd/backup/lock.go b/cmd/backup/lock.go index dbb7d835..5c684885 100644 --- a/cmd/backup/lock.go +++ b/cmd/backup/lock.go @@ -9,7 +9,7 @@ import ( "time" "github.com/gofrs/flock" - u "github.com/offen/docker-volume-backup/internal/utilities" + utilites "github.com/offen/docker-volume-backup/internal/utilities" ) // lock opens a lockfile at the given location, keeping it locked until the @@ -32,7 +32,7 @@ func (s *script) lock(lockfile string) (func() error, error) { for { acquired, err := fileLock.TryLock() if err != nil { - return u.Noop, fmt.Errorf("lock: error trying lock: %w", err) + return utilites.Noop, fmt.Errorf("lock: error trying lock: %w", err) } if acquired { if s.encounteredLock { @@ -53,7 +53,7 @@ func (s *script) lock(lockfile string) (func() error, error) { case <-retry.C: continue case <-deadline.C: - return u.Noop, errors.New("lock: timed out waiting for lockfile to become available") + return utilites.Noop, errors.New("lock: timed out waiting for lockfile to become available") } } } diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index 52fd5140..a5fb5b3a 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -13,7 +13,7 @@ import ( sTypes "github.com/containrrr/shoutrrr/pkg/types" t "github.com/offen/docker-volume-backup/internal/types" - u "github.com/offen/docker-volume-backup/internal/utilities" + utilites "github.com/offen/docker-volume-backup/internal/utilities" ) //go:embed notifications.tmpl @@ -70,7 +70,7 @@ func (s *script) sendNotification(title, body string) error { } } if len(errs) != 0 { - return fmt.Errorf("sendNotification: error sending message: %w", u.Join(errs...)) + return fmt.Errorf("sendNotification: error sending message: %w", utilites.Join(errs...)) } return nil } diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 74cd5793..085b8fd7 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -14,13 +14,13 @@ import ( "text/template" "time" - strg "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/storage/local" "github.com/offen/docker-volume-backup/internal/storage/s3" "github.com/offen/docker-volume-backup/internal/storage/ssh" "github.com/offen/docker-volume-backup/internal/storage/webdav" t "github.com/offen/docker-volume-backup/internal/types" - u "github.com/offen/docker-volume-backup/internal/utilities" + utilites "github.com/offen/docker-volume-backup/internal/utilities" "github.com/containrrr/shoutrrr" "github.com/containrrr/shoutrrr/pkg/router" @@ -39,7 +39,7 @@ import ( // single backup run. type script struct { cli *client.Client - storagePool []*strg.StorageBackend + storagePool []storage.Backend logger *logrus.Logger sender *router.ServiceRouter template *template.Template @@ -59,7 +59,7 @@ type script struct { // reading from env vars or other configuration sources is expected to happen // in this method. func newScript() (*script, error) { - stdOut, logBuffer := u.Buffer(os.Stdout) + stdOut, logBuffer := utilites.Buffer(os.Stdout) s := &script{ c: &t.Config{}, logger: &logrus.Logger{ @@ -104,7 +104,8 @@ func newScript() (*script, error) { } if s.c.AwsS3BucketName != "" { - if s3Backend, err := s3.InitS3(s.c, s.logger, s.stats); err != nil { + if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint, + s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, s.logger, s.stats); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, s3Backend) @@ -112,7 +113,8 @@ func newScript() (*script, error) { } if s.c.WebdavUrl != "" { - if webdavBackend, err := webdav.InitWebDav(s.c, s.logger, s.stats); err != nil { + if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword, + s.c.WebdavUrlInsecure, s.logger, s.stats); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, webdavBackend) @@ -120,14 +122,15 @@ func newScript() (*script, error) { } if s.c.SSHHostName != "" { - if sshBackend, err := ssh.InitSSH(s.c, s.logger, s.stats); err != nil { + if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile, + s.c.SSHIdentityPassphrase, s.c.WebdavPath, s.logger, s.stats); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, sshBackend) } } - localBackend := local.InitLocal(s.c, s.logger, s.stats) + localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, s.logger, s.stats) s.storagePool = append(s.storagePool, localBackend) if s.c.EmailNotificationRecipient != "" { @@ -201,14 +204,14 @@ func newScript() (*script, error) { // restart everything that has been stopped. func (s *script) stopContainers() (func() error, error) { if s.cli == nil { - return u.Noop, nil + return utilites.Noop, nil } allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ Quiet: true, }) if err != nil { - return u.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) + return utilites.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) } containerLabel := fmt.Sprintf( @@ -224,11 +227,11 @@ func (s *script) stopContainers() (func() error, error) { }) if err != nil { - return u.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) + return utilites.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) } if len(containersToStop) == 0 { - return u.Noop, nil + return utilites.Noop, nil } s.logger.Infof( @@ -253,7 +256,7 @@ func (s *script) stopContainers() (func() error, error) { stopError = fmt.Errorf( "stopContainersAndRun: %d error(s) stopping containers: %w", len(stopErrors), - u.Join(stopErrors...), + utilites.Join(stopErrors...), ) } @@ -304,7 +307,7 @@ func (s *script) stopContainers() (func() error, error) { return fmt.Errorf( "stopContainersAndRun: %d error(s) restarting containers and services: %w", len(restartErrors), - u.Join(restartErrors...), + utilites.Join(restartErrors...), ) } s.logger.Infof( @@ -330,7 +333,7 @@ func (s *script) createArchive() error { backupSources = filepath.Join("/tmp", s.c.BackupSources) // copy before compressing guard against a situation where backup folder's content are still growing. s.registerHook(hookLevelPlumbing, func(error) error { - if err := u.Remove(backupSources); err != nil { + if err := utilites.Remove(backupSources); err != nil { return fmt.Errorf("takeBackup: error removing snapshot: %w", err) } s.logger.Infof("Removed snapshot `%s`.", backupSources) @@ -347,7 +350,7 @@ func (s *script) createArchive() error { tarFile := s.file s.registerHook(hookLevelPlumbing, func(error) error { - if err := u.Remove(tarFile); err != nil { + if err := utilites.Remove(tarFile); err != nil { return fmt.Errorf("takeBackup: error removing tar file: %w", err) } s.logger.Infof("Removed tar file `%s`.", tarFile) @@ -392,7 +395,7 @@ func (s *script) encryptArchive() error { gpgFile := fmt.Sprintf("%s.gpg", s.file) s.registerHook(hookLevelPlumbing, func(error) error { - if err := u.Remove(gpgFile); err != nil { + if err := utilites.Remove(gpgFile); err != nil { return fmt.Errorf("encryptBackup: error removing gpg file: %w", err) } s.logger.Infof("Removed GPG file `%s`.", gpgFile) @@ -464,7 +467,7 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) for _, backend := range s.storagePool { - if err := backend.Prune(deadline); err != nil { + if err := backend.Prune(deadline, s.c.BackupPruningPrefix); err != nil { return err } } diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 4570e85a..889e8561 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -7,62 +7,66 @@ import ( "path/filepath" "time" - strg "github.com/offen/docker-volume-backup/internal/storage" - t "github.com/offen/docker-volume-backup/internal/types" - u "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/types" + utilites "github.com/offen/docker-volume-backup/internal/utilities" "github.com/sirupsen/logrus" ) -type LocalStorage struct { - *strg.StorageBackend +type localStorage struct { + *storage.StorageBackend + latestSymlink string } -// Specific init procedure for the local storage provider. -func InitLocal(c *t.Config, l *logrus.Logger, s *t.Stats) *strg.StorageBackend { - a := &strg.StorageBackend{ - Storage: &LocalStorage{}, - Name: "Local", - Logger: l, - Config: c, - Stats: s, +// NewStorageBackend creates and initializes a new local storage backend. +func NewStorageBackend(archivePath string, latestSymlink string, l *logrus.Logger, s *types.Stats) storage.Backend { + strgBackend := &storage.StorageBackend{ + Backend: &localStorage{}, + Name: "Local", + DestinationPath: archivePath, + Logger: l, + Stats: s, } - r := &LocalStorage{a} - a.Storage = r - return a + localBackend := &localStorage{ + StorageBackend: strgBackend, + latestSymlink: latestSymlink, + } + strgBackend.Backend = localBackend + return strgBackend } -// Specific copy function for the local storage provider. -func (stg *LocalStorage) Copy(file string) error { - if _, err := os.Stat(stg.Config.BackupArchive); os.IsNotExist(err) { +// Copy copies the given file to the local storage backend. +func (stg *localStorage) Copy(file string) error { + if _, err := os.Stat(stg.DestinationPath); os.IsNotExist(err) { return nil } _, name := path.Split(file) - if err := u.CopyFile(file, path.Join(stg.Config.BackupArchive, name)); err != nil { + if err := utilites.CopyFile(file, path.Join(stg.DestinationPath, name)); err != nil { return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) } - stg.Logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, stg.Config.BackupArchive) + stg.Logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) - if stg.Config.BackupLatestSymlink != "" { - symlink := path.Join(stg.Config.BackupArchive, stg.Config.BackupLatestSymlink) + if stg.latestSymlink != "" { + symlink := path.Join(stg.DestinationPath, stg.latestSymlink) if _, err := os.Lstat(symlink); err == nil { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) } - stg.Logger.Infof("Created/Updated symlink `%s` for latest backup.", stg.Config.BackupLatestSymlink) + stg.Logger.Infof("Created/Updated symlink `%s` for latest backup.", stg.latestSymlink) } return nil } -// Specific prune function for the local storage provider. -func (stg *LocalStorage) Prune(deadline time.Time) error { +// Prune rotates away backups according to the configuration and provided deadline for the local storage backend. +func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { globPattern := path.Join( - stg.Config.BackupArchive, - fmt.Sprintf("%s*", stg.Config.BackupPruningPrefix), + stg.DestinationPath, + fmt.Sprintf("%s*", pruningPrefix), ) globMatches, err := filepath.Glob(globPattern) if err != nil { @@ -104,7 +108,7 @@ func (stg *LocalStorage) Prune(deadline time.Time) error { } } - stg.Stats.Storages.Local = t.StorageStats{ + stg.Stats.Storages.Local = types.StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } @@ -120,7 +124,7 @@ func (stg *LocalStorage) Prune(deadline time.Time) error { return fmt.Errorf( "pruneBackups: %d error(s) deleting local files, starting with: %w", len(removeErrors), - u.Join(removeErrors...), + utilites.Join(removeErrors...), ) } return nil diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 3271ad69..336bb8c7 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -10,38 +10,42 @@ import ( "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" - strg "github.com/offen/docker-volume-backup/internal/storage" - t "github.com/offen/docker-volume-backup/internal/types" - u "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/types" + utilites "github.com/offen/docker-volume-backup/internal/utilities" "github.com/sirupsen/logrus" ) -type S3Storage struct { - *strg.StorageBackend - client *minio.Client +type s3Storage struct { + *storage.StorageBackend + client *minio.Client + bucket string + storageClass string } -// Specific init procedure for the S3/Minio storage provider. -func InitS3(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, error) { +// NewStorageBackend creates and initializes a new S3/Minio storage backend. +func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, + remotePath string, bucket string, storageClass string, l *logrus.Logger, s *types.Stats) (storage.Backend, error) { + var creds *credentials.Credentials - if c.AwsAccessKeyID != "" && c.AwsSecretAccessKey != "" { + if accessKeyId != "" && secretAccessKey != "" { creds = credentials.NewStaticV4( - c.AwsAccessKeyID, - c.AwsSecretAccessKey, + accessKeyId, + secretAccessKey, "", ) - } else if c.AwsIamRoleEndpoint != "" { - creds = credentials.NewIAM(c.AwsIamRoleEndpoint) + } else if iamRoleEndpoint != "" { + creds = credentials.NewIAM(iamRoleEndpoint) } else { return nil, errors.New("newScript: AWS_S3_BUCKET_NAME is defined, but no credentials were provided") } options := minio.Options{ Creds: creds, - Secure: c.AwsEndpointProto == "https", + Secure: endpointProto == "https", } - if c.AwsEndpointInsecure { + if endpointInsecure { if !options.Secure { return nil, errors.New("newScript: AWS_ENDPOINT_INSECURE = true is only meaningful for https") } @@ -54,43 +58,48 @@ func InitS3(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, er options.Transport = transport } - mc, err := minio.New(c.AwsEndpoint, &options) + mc, err := minio.New(endpoint, &options) if err != nil { return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) } - a := &strg.StorageBackend{ - Storage: &S3Storage{}, - Name: "S3", - Logger: l, - Config: c, - Stats: s, + strgBackend := &storage.StorageBackend{ + Backend: &s3Storage{}, + Name: "S3", + DestinationPath: remotePath, + Logger: l, + Stats: s, + } + sshBackend := &s3Storage{ + StorageBackend: strgBackend, + client: mc, + bucket: bucket, + storageClass: storageClass, } - r := &S3Storage{a, mc} - a.Storage = r - return a, nil + strgBackend.Backend = sshBackend + return strgBackend, nil } -// Specific copy function for the S3/Minio storage provider. -func (stg *S3Storage) Copy(file string) error { +// Copy copies the given file to the S3/Minio storage backend. +func (stg *s3Storage) Copy(file string) error { _, name := path.Split(file) - if _, err := stg.client.FPutObject(context.Background(), stg.Config.AwsS3BucketName, filepath.Join(stg.Config.AwsS3Path, name), file, minio.PutObjectOptions{ + if _, err := stg.client.FPutObject(context.Background(), stg.bucket, filepath.Join(stg.DestinationPath, name), file, minio.PutObjectOptions{ ContentType: "application/tar+gzip", - StorageClass: stg.Config.AwsStorageClass, + StorageClass: stg.storageClass, }); err != nil { return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err) } - stg.Logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.Config.AwsS3BucketName) + stg.Logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) return nil } -// Specific prune function for the S3/Minio storage provider. -func (stg *S3Storage) Prune(deadline time.Time) error { - candidates := stg.client.ListObjects(context.Background(), stg.Config.AwsS3BucketName, minio.ListObjectsOptions{ +// Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend. +func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) error { + candidates := stg.client.ListObjects(context.Background(), stg.bucket, minio.ListObjectsOptions{ WithMetadata: true, - Prefix: filepath.Join(stg.Config.AwsS3Path, stg.Config.BackupPruningPrefix), + Prefix: filepath.Join(stg.DestinationPath, pruningPrefix), Recursive: true, }) @@ -109,7 +118,7 @@ func (stg *S3Storage) Prune(deadline time.Time) error { } } - stg.Stats.Storages.S3 = t.StorageStats{ + stg.Stats.Storages.S3 = types.StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } @@ -122,7 +131,7 @@ func (stg *S3Storage) Prune(deadline time.Time) error { } close(objectsCh) }() - errChan := stg.client.RemoveObjects(context.Background(), stg.Config.AwsS3BucketName, objectsCh, minio.RemoveObjectsOptions{}) + errChan := stg.client.RemoveObjects(context.Background(), stg.bucket, objectsCh, minio.RemoveObjectsOptions{}) var removeErrors []error for result := range errChan { if result.Err != nil { @@ -130,7 +139,7 @@ func (stg *S3Storage) Prune(deadline time.Time) error { } } if len(removeErrors) != 0 { - return u.Join(removeErrors...) + return utilites.Join(removeErrors...) } return nil }) diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index 37d3b0e0..13ac51eb 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -11,36 +11,39 @@ import ( "strings" "time" - strg "github.com/offen/docker-volume-backup/internal/storage" - t "github.com/offen/docker-volume-backup/internal/types" + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/types" "github.com/pkg/sftp" "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" ) -type SSHStorage struct { - *strg.StorageBackend +type sshStorage struct { + *storage.StorageBackend client *ssh.Client sftpClient *sftp.Client + hostName string } -// Specific init procedure for the SSH storage provider. -func InitSSH(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, error) { +// NewStorageBackend creates and initializes a new SSH storage backend. +func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, + l *logrus.Logger, s *types.Stats) (storage.Backend, error) { + var authMethods []ssh.AuthMethod - if c.SSHPassword != "" { - authMethods = append(authMethods, ssh.Password(c.SSHPassword)) + if password != "" { + authMethods = append(authMethods, ssh.Password(password)) } - if _, err := os.Stat(c.SSHIdentityFile); err == nil { - key, err := ioutil.ReadFile(c.SSHIdentityFile) + if _, err := os.Stat(identityFile); err == nil { + key, err := ioutil.ReadFile(identityFile) if err != nil { return nil, errors.New("newScript: error reading the private key") } var signer ssh.Signer - if c.SSHIdentityPassphrase != "" { - signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(c.SSHIdentityPassphrase)) + if identityPassphrase != "" { + signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(identityPassphrase)) if err != nil { return nil, errors.New("newScript: error parsing the encrypted private key") } @@ -55,11 +58,11 @@ func InitSSH(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, e } sshClientConfig := &ssh.ClientConfig{ - User: c.SSHUser, + User: user, Auth: authMethods, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } - sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", c.SSHHostName, c.SSHPort), sshClientConfig) + sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", hostName, port), sshClientConfig) if err != nil { return nil, fmt.Errorf("newScript: error creating ssh client: %w", err) @@ -74,20 +77,25 @@ func InitSSH(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, e return nil, fmt.Errorf("newScript: error creating sftp client: %w", err) } - a := &strg.StorageBackend{ - Storage: &SSHStorage{}, - Name: "SSH", - Logger: l, - Config: c, - Stats: s, + strgBackend := &storage.StorageBackend{ + Backend: &sshStorage{}, + Name: "SSH", + DestinationPath: remotePath, + Logger: l, + Stats: s, + } + sshBackend := &sshStorage{ + StorageBackend: strgBackend, + client: sshClient, + sftpClient: sftpClient, + hostName: hostName, } - r := &SSHStorage{a, sshClient, sftpClient} - a.Storage = r - return a, nil + strgBackend.Backend = sshBackend + return strgBackend, nil } -// Specific copy function for the SSH storage provider. -func (stg *SSHStorage) Copy(file string) error { +// Copy copies the given file to the SSH storage backend. +func (stg *sshStorage) Copy(file string) error { source, err := os.Open(file) _, name := path.Split(file) if err != nil { @@ -95,7 +103,7 @@ func (stg *SSHStorage) Copy(file string) error { } defer source.Close() - destination, err := stg.sftpClient.Create(filepath.Join(stg.Config.SSHRemotePath, name)) + destination, err := stg.sftpClient.Create(filepath.Join(stg.DestinationPath, name)) if err != nil { return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) } @@ -131,21 +139,21 @@ func (stg *SSHStorage) Copy(file string) error { } } - stg.Logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.Config.SSHHostName, stg.Config.SSHRemotePath) + stg.Logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.hostName, stg.DestinationPath) return nil } -// Specific prune function for the SSH storage provider. -func (stg *SSHStorage) Prune(deadline time.Time) error { - candidates, err := stg.sftpClient.ReadDir(stg.Config.SSHRemotePath) +// Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend. +func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) error { + candidates, err := stg.sftpClient.ReadDir(stg.DestinationPath) if err != nil { return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) } var matches []string for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), stg.Config.BackupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), pruningPrefix) { continue } if candidate.ModTime().Before(deadline) { @@ -153,14 +161,14 @@ func (stg *SSHStorage) Prune(deadline time.Time) error { } } - stg.Stats.Storages.SSH = t.StorageStats{ + stg.Stats.Storages.SSH = types.StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } stg.DoPrune(len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { - if err := stg.sftpClient.Remove(filepath.Join(stg.Config.SSHRemotePath, match)); err != nil { + if err := stg.sftpClient.Remove(filepath.Join(stg.DestinationPath, match)); err != nil { return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) } } diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 24ff4fd6..cd6060f2 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -8,21 +8,22 @@ import ( ) // Interface for defining functions which all storage providers support. -type Storage interface { +type Backend interface { Copy(file string) error - Prune(deadline time.Time) error + Prune(deadline time.Time, pruningPrefix string) error } // Generic type of storage. Everything here are common properties of all storage types. type StorageBackend struct { - Storage - Name string - Logger *logrus.Logger - Config *t.Config - Stats *t.Stats + Backend + Name string + DestinationPath string + RetentionDays int + Logger *logrus.Logger + Stats *t.Stats } -// doPrune holds general control flow that applies to any kind of storage. +// DoPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { if lenMatches != 0 && lenMatches != lenCandidates { @@ -34,7 +35,7 @@ func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description st lenMatches, lenCandidates, description, - stg.Config.BackupRetentionDays, + stg.RetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { stg.Logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index 6a078d81..3f7448a1 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -11,75 +11,79 @@ import ( "strings" "time" - strg "github.com/offen/docker-volume-backup/internal/storage" - t "github.com/offen/docker-volume-backup/internal/types" + "github.com/offen/docker-volume-backup/internal/storage" + "github.com/offen/docker-volume-backup/internal/types" "github.com/sirupsen/logrus" "github.com/studio-b12/gowebdav" ) -type WebDavStorage struct { - *strg.StorageBackend +type webDavStorage struct { + *storage.StorageBackend client *gowebdav.Client + url string } -// Specific init procedure for the WebDav storage provider. -func InitWebDav(c *t.Config, l *logrus.Logger, s *t.Stats) (*strg.StorageBackend, error) { - if c.WebdavUsername == "" || c.WebdavPassword == "" { +// NewStorageBackend creates and initializes a new WebDav storage backend. +func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, l *logrus.Logger, s *types.Stats) (storage.Backend, error) { + if username == "" || password == "" { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") } else { - webdavClient := gowebdav.NewClient(c.WebdavUrl, c.WebdavUsername, c.WebdavPassword) + webdavClient := gowebdav.NewClient(url, username, password) - if c.WebdavUrlInsecure { + if urlInsecure { defaultTransport, ok := http.DefaultTransport.(*http.Transport) if !ok { return nil, errors.New("newScript: unexpected error when asserting type for http.DefaultTransport") } webdavTransport := defaultTransport.Clone() - webdavTransport.TLSClientConfig.InsecureSkipVerify = c.WebdavUrlInsecure + webdavTransport.TLSClientConfig.InsecureSkipVerify = urlInsecure webdavClient.SetTransport(webdavTransport) } - a := &strg.StorageBackend{ - Storage: &WebDavStorage{}, - Name: "WebDav", - Logger: l, - Config: c, - Stats: s, + strgBackend := &storage.StorageBackend{ + Backend: &webDavStorage{}, + Name: "WebDav", + DestinationPath: remotePath, + Logger: l, + Stats: s, } - r := &WebDavStorage{a, webdavClient} - a.Storage = r - return a, nil + webdavBackend := &webDavStorage{ + StorageBackend: strgBackend, + client: webdavClient, + } + strgBackend.Backend = webdavBackend + return strgBackend, nil } } -// Specific copy function for the WebDav storage provider. -func (stg *WebDavStorage) Copy(file string) error { +// Copy copies the given file to the WebDav storage backend. +func (stg *webDavStorage) Copy(file string) error { bytes, err := os.ReadFile(file) _, name := path.Split(file) if err != nil { return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) } - if err := stg.client.MkdirAll(stg.Config.WebdavPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", stg.Config.WebdavPath, err) + if err := stg.client.MkdirAll(stg.DestinationPath, 0644); err != nil { + return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", stg.DestinationPath, err) } - if err := stg.client.Write(filepath.Join(stg.Config.WebdavPath, name), bytes, 0644); err != nil { + if err := stg.client.Write(filepath.Join(stg.DestinationPath, name), bytes, 0644); err != nil { return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) } - stg.Logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.Config.WebdavUrl, stg.Config.WebdavPath) + stg.Logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.url, stg.DestinationPath) return nil } -// Specific prune function for the WebDav storage provider. -func (stg *WebDavStorage) Prune(deadline time.Time) error { - candidates, err := stg.client.ReadDir(stg.Config.WebdavPath) +// Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend. +func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) error { + candidates, err := stg.client.ReadDir(stg.DestinationPath) if err != nil { return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) } var matches []fs.FileInfo var lenCandidates int for _, candidate := range candidates { - if !strings.HasPrefix(candidate.Name(), stg.Config.BackupPruningPrefix) { + if !strings.HasPrefix(candidate.Name(), pruningPrefix) { continue } lenCandidates++ @@ -88,14 +92,14 @@ func (stg *WebDavStorage) Prune(deadline time.Time) error { } } - stg.Stats.Storages.WebDAV = t.StorageStats{ + stg.Stats.Storages.WebDAV = types.StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } stg.DoPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { - if err := stg.client.Remove(filepath.Join(stg.Config.WebdavPath, match.Name())); err != nil { + if err := stg.client.Remove(filepath.Join(stg.DestinationPath, match.Name())); err != nil { return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) } } From ce432017784b76e4c9826b233494961ac27958ea Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Sun, 24 Jul 2022 19:50:52 +0200 Subject: [PATCH 15/32] Fixed auto-complete error. --- cmd/backup/script.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 085b8fd7..913b7bcf 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -123,7 +123,7 @@ func newScript() (*script, error) { if s.c.SSHHostName != "" { if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile, - s.c.SSHIdentityPassphrase, s.c.WebdavPath, s.logger, s.stats); err != nil { + s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, s.logger, s.stats); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, sshBackend) From 01e40fd097e182f0ee221e9cc75d36782e1623bb Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 25 Jul 2022 18:39:22 +0200 Subject: [PATCH 16/32] Combined copy instructions into one layer. --- .dockerignore | 7 +++++++ Dockerfile | 4 +--- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.dockerignore b/.dockerignore index 9daeafb9..d9026017 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1,8 @@ test +.cirecleci +.github +docs +.editorconfig +Dockerfile +LICENSE +README.md \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index dbfeac63..193034b2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,10 +4,8 @@ FROM golang:1.18-alpine as builder WORKDIR /app -COPY go.mod go.sum ./ +COPY . . RUN go mod download -COPY cmd/backup ./cmd/backup/ -COPY internal ./internal WORKDIR /app/cmd/backup RUN go build -o backup . From 76826bb9dd264a1327fa6ea5279d13c4930fbe11 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 25 Jul 2022 18:57:25 +0200 Subject: [PATCH 17/32] Added logging func for storages. --- cmd/backup/script.go | 19 +++++++++++++++---- internal/storage/local/local.go | 9 ++++----- internal/storage/s3/s3.go | 7 +++---- internal/storage/ssh/ssh.go | 7 +++---- internal/storage/storage.go | 19 +++++++++++++------ internal/storage/webdav/webdav.go | 9 +++++---- 6 files changed, 43 insertions(+), 27 deletions(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 913b7bcf..3b3435c5 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -103,9 +103,20 @@ func newScript() (*script, error) { s.cli = cli } + logFunc := func(logType storage.LogType, msg string, params ...interface{}) { + switch logType { + case storage.INFO: + s.logger.Infof(msg, params) + case storage.WARNING: + s.logger.Warnf(msg, params) + case storage.ERROR: + s.logger.Errorf(msg, params) + } + } + if s.c.AwsS3BucketName != "" { if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint, - s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, s.logger, s.stats); err != nil { + s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc, s.stats); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, s3Backend) @@ -114,7 +125,7 @@ func newScript() (*script, error) { if s.c.WebdavUrl != "" { if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword, - s.c.WebdavUrlInsecure, s.logger, s.stats); err != nil { + s.c.WebdavUrlInsecure, logFunc, s.stats); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, webdavBackend) @@ -123,14 +134,14 @@ func newScript() (*script, error) { if s.c.SSHHostName != "" { if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile, - s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, s.logger, s.stats); err != nil { + s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc, s.stats); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, sshBackend) } } - localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, s.logger, s.stats) + localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc, s.stats) s.storagePool = append(s.storagePool, localBackend) if s.c.EmailNotificationRecipient != "" { diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 889e8561..32ddfd64 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -10,7 +10,6 @@ import ( "github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/types" utilites "github.com/offen/docker-volume-backup/internal/utilities" - "github.com/sirupsen/logrus" ) type localStorage struct { @@ -19,12 +18,12 @@ type localStorage struct { } // NewStorageBackend creates and initializes a new local storage backend. -func NewStorageBackend(archivePath string, latestSymlink string, l *logrus.Logger, s *types.Stats) storage.Backend { +func NewStorageBackend(archivePath string, latestSymlink string, logFunc func(logType storage.LogType, msg string, params ...interface{}), s *types.Stats) storage.Backend { strgBackend := &storage.StorageBackend{ Backend: &localStorage{}, Name: "Local", DestinationPath: archivePath, - Logger: l, + Log: logFunc, Stats: s, } localBackend := &localStorage{ @@ -46,7 +45,7 @@ func (stg *localStorage) Copy(file string) error { if err := utilites.CopyFile(file, path.Join(stg.DestinationPath, name)); err != nil { return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) } - stg.Logger.Infof("Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) + stg.Log(storage.INFO, "Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) if stg.latestSymlink != "" { symlink := path.Join(stg.DestinationPath, stg.latestSymlink) @@ -56,7 +55,7 @@ func (stg *localStorage) Copy(file string) error { if err := os.Symlink(name, symlink); err != nil { return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) } - stg.Logger.Infof("Created/Updated symlink `%s` for latest backup.", stg.latestSymlink) + stg.Log(storage.INFO, "Created/Updated symlink `%s` for latest backup.", stg.latestSymlink) } return nil diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 336bb8c7..4ad1d2bf 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -13,7 +13,6 @@ import ( "github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/types" utilites "github.com/offen/docker-volume-backup/internal/utilities" - "github.com/sirupsen/logrus" ) type s3Storage struct { @@ -25,7 +24,7 @@ type s3Storage struct { // NewStorageBackend creates and initializes a new S3/Minio storage backend. func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, - remotePath string, bucket string, storageClass string, l *logrus.Logger, s *types.Stats) (storage.Backend, error) { + remotePath string, bucket string, storageClass string, logFunc func(logType storage.LogType, msg string, params ...interface{}), s *types.Stats) (storage.Backend, error) { var creds *credentials.Credentials if accessKeyId != "" && secretAccessKey != "" { @@ -67,7 +66,7 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri Backend: &s3Storage{}, Name: "S3", DestinationPath: remotePath, - Logger: l, + Log: logFunc, Stats: s, } sshBackend := &s3Storage{ @@ -90,7 +89,7 @@ func (stg *s3Storage) Copy(file string) error { }); err != nil { return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err) } - stg.Logger.Infof("Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) + stg.Log(storage.INFO, "Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) return nil } diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index 13ac51eb..be71e6e3 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -14,7 +14,6 @@ import ( "github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/types" "github.com/pkg/sftp" - "github.com/sirupsen/logrus" "golang.org/x/crypto/ssh" ) @@ -27,7 +26,7 @@ type sshStorage struct { // NewStorageBackend creates and initializes a new SSH storage backend. func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, - l *logrus.Logger, s *types.Stats) (storage.Backend, error) { + logFunc func(logType storage.LogType, msg string, params ...interface{}), s *types.Stats) (storage.Backend, error) { var authMethods []ssh.AuthMethod @@ -81,7 +80,7 @@ func NewStorageBackend(hostName string, port string, user string, password strin Backend: &sshStorage{}, Name: "SSH", DestinationPath: remotePath, - Logger: l, + Log: logFunc, Stats: s, } sshBackend := &sshStorage{ @@ -139,7 +138,7 @@ func (stg *sshStorage) Copy(file string) error { } } - stg.Logger.Infof("Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.hostName, stg.DestinationPath) + stg.Log(storage.INFO, "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.hostName, stg.DestinationPath) return nil } diff --git a/internal/storage/storage.go b/internal/storage/storage.go index cd6060f2..7fba6541 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -4,7 +4,6 @@ import ( "time" t "github.com/offen/docker-volume-backup/internal/types" - "github.com/sirupsen/logrus" ) // Interface for defining functions which all storage providers support. @@ -19,10 +18,18 @@ type StorageBackend struct { Name string DestinationPath string RetentionDays int - Logger *logrus.Logger + Log func(logType LogType, msg string, params ...interface{}) Stats *t.Stats } +type LogType string + +const ( + INFO LogType = "INFO" + WARNING LogType = "WARNING" + ERROR LogType = "ERROR" +) + // DoPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { @@ -30,7 +37,7 @@ func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description st if err := doRemoveFiles(); err != nil { return err } - stg.Logger.Infof( + stg.Log(INFO, "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", lenMatches, lenCandidates, @@ -38,10 +45,10 @@ func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description st stg.RetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { - stg.Logger.Warnf("The current configuration would delete all %d existing %s.", lenMatches, description) - stg.Logger.Warn("Refusing to do so, please check your configuration.") + stg.Log(WARNING, "The current configuration would delete all %d existing %s.", lenMatches, description) + stg.Log(WARNING, "Refusing to do so, please check your configuration.") } else { - stg.Logger.Infof("None of %d existing %s were pruned.", lenCandidates, description) + stg.Log(INFO, "None of %d existing %s were pruned.", lenCandidates, description) } return nil } diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index 3f7448a1..c17ae18c 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -13,7 +13,6 @@ import ( "github.com/offen/docker-volume-backup/internal/storage" "github.com/offen/docker-volume-backup/internal/types" - "github.com/sirupsen/logrus" "github.com/studio-b12/gowebdav" ) @@ -24,7 +23,9 @@ type webDavStorage struct { } // NewStorageBackend creates and initializes a new WebDav storage backend. -func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, l *logrus.Logger, s *types.Stats) (storage.Backend, error) { +func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, + logFunc func(logType storage.LogType, msg string, params ...interface{}), s *types.Stats) (storage.Backend, error) { + if username == "" || password == "" { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") } else { @@ -44,7 +45,7 @@ func NewStorageBackend(url string, remotePath string, username string, password Backend: &webDavStorage{}, Name: "WebDav", DestinationPath: remotePath, - Logger: l, + Log: logFunc, Stats: s, } webdavBackend := &webDavStorage{ @@ -69,7 +70,7 @@ func (stg *webDavStorage) Copy(file string) error { if err := stg.client.Write(filepath.Join(stg.DestinationPath, name), bytes, 0644); err != nil { return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) } - stg.Logger.Infof("Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.url, stg.DestinationPath) + stg.Log(storage.INFO, "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.url, stg.DestinationPath) return nil } From d0a4ade1cf1516c95964676198523ec633db3bca Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 25 Jul 2022 19:33:07 +0200 Subject: [PATCH 18/32] Introduced logging func for errors too. --- cmd/backup/script.go | 17 +++++++++++++---- internal/storage/local/local.go | 28 +++++++++++++++------------- internal/storage/s3/s3.go | 15 +++++++-------- internal/storage/ssh/ssh.go | 26 +++++++++++++------------- internal/storage/storage.go | 12 +++++++----- internal/storage/webdav/webdav.go | 15 +++++++-------- 6 files changed, 62 insertions(+), 51 deletions(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 3b3435c5..f817775e 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -103,14 +103,23 @@ func newScript() (*script, error) { s.cli = cli } - logFunc := func(logType storage.LogType, msg string, params ...interface{}) { + logFunc := func(logType storage.LogType, context string, msg string, params ...interface{}) error { + var allParams []interface{} + allParams = append(allParams, context) + allParams = append(allParams, params...) + switch logType { case storage.INFO: - s.logger.Infof(msg, params) + s.logger.Infof("[%s] "+msg, allParams...) + return nil case storage.WARNING: - s.logger.Warnf(msg, params) + s.logger.Warnf("[%s] "+msg, allParams...) + return nil case storage.ERROR: - s.logger.Errorf(msg, params) + return fmt.Errorf("[%s] "+msg, allParams...) + default: + s.logger.Warnf("[%s] "+msg, allParams...) + return nil } } diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 32ddfd64..712bdcf0 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -18,7 +18,9 @@ type localStorage struct { } // NewStorageBackend creates and initializes a new local storage backend. -func NewStorageBackend(archivePath string, latestSymlink string, logFunc func(logType storage.LogType, msg string, params ...interface{}), s *types.Stats) storage.Backend { +func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.LogFuncDef, + s *types.Stats) storage.Backend { + strgBackend := &storage.StorageBackend{ Backend: &localStorage{}, Name: "Local", @@ -43,9 +45,9 @@ func (stg *localStorage) Copy(file string) error { _, name := path.Split(file) if err := utilites.CopyFile(file, path.Join(stg.DestinationPath, name)); err != nil { - return fmt.Errorf("copyBackup: error copying file to local archive: %w", err) + return stg.Log(storage.ERROR, stg.Name, "copyBackup: error copying file to local archive: %w", err) } - stg.Log(storage.INFO, "Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) + stg.Log(storage.INFO, stg.Name, "Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) if stg.latestSymlink != "" { symlink := path.Join(stg.DestinationPath, stg.latestSymlink) @@ -53,9 +55,9 @@ func (stg *localStorage) Copy(file string) error { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { - return fmt.Errorf("copyBackup: error creating latest symlink: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: error creating latest symlink! %w", err) } - stg.Log(storage.INFO, "Created/Updated symlink `%s` for latest backup.", stg.latestSymlink) + stg.Log(storage.INFO, stg.Name, "Created/Updated symlink `%s` for latest backup.", stg.latestSymlink) } return nil @@ -69,8 +71,8 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { ) globMatches, err := filepath.Glob(globPattern) if err != nil { - return fmt.Errorf( - "pruneBackups: error looking up matching files using pattern %s: %w", + return stg.Log(storage.ERROR, stg.Name, + "Prune: Error looking up matching files using pattern %s! %w", globPattern, err, ) @@ -80,8 +82,8 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { for _, candidate := range globMatches { fi, err := os.Lstat(candidate) if err != nil { - return fmt.Errorf( - "pruneBackups: error calling Lstat on file %s: %w", + return stg.Log(storage.ERROR, stg.Name, + "Prune: Error calling Lstat on file %s! %w", candidate, err, ) @@ -96,8 +98,8 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { for _, candidate := range candidates { fi, err := os.Stat(candidate) if err != nil { - return fmt.Errorf( - "pruneBackups: error calling stat on file %s: %w", + return stg.Log(storage.ERROR, stg.Name, + "Prune: Error calling stat on file %s! %w", candidate, err, ) @@ -120,8 +122,8 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { } } if len(removeErrors) != 0 { - return fmt.Errorf( - "pruneBackups: %d error(s) deleting local files, starting with: %w", + return stg.Log(storage.ERROR, stg.Name, + "Prune: %d error(s) deleting local files, starting with: %w", len(removeErrors), utilites.Join(removeErrors...), ) diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 4ad1d2bf..26fe02f5 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -3,7 +3,6 @@ package s3 import ( "context" "errors" - "fmt" "path" "path/filepath" "time" @@ -24,7 +23,7 @@ type s3Storage struct { // NewStorageBackend creates and initializes a new S3/Minio storage backend. func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, - remotePath string, bucket string, storageClass string, logFunc func(logType storage.LogType, msg string, params ...interface{}), s *types.Stats) (storage.Backend, error) { + remotePath string, bucket string, storageClass string, logFunc storage.LogFuncDef, s *types.Stats) (storage.Backend, error) { var creds *credentials.Credentials if accessKeyId != "" && secretAccessKey != "" { @@ -51,7 +50,7 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri transport, err := minio.DefaultTransport(true) if err != nil { - return nil, fmt.Errorf("newScript: failed to create default minio transport") + return nil, logFunc(storage.ERROR, "S3", "NewScript: failed to create default minio transport") } transport.TLSClientConfig.InsecureSkipVerify = true options.Transport = transport @@ -59,7 +58,7 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri mc, err := minio.New(endpoint, &options) if err != nil { - return nil, fmt.Errorf("newScript: error setting up minio client: %w", err) + return nil, logFunc(storage.ERROR, "S3", "NewScript: error setting up minio client: %w", err) } strgBackend := &storage.StorageBackend{ @@ -87,9 +86,9 @@ func (stg *s3Storage) Copy(file string) error { ContentType: "application/tar+gzip", StorageClass: stg.storageClass, }); err != nil { - return fmt.Errorf("copyBackup: error uploading backup to remote storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading backup to remote storage! %w", err) } - stg.Log(storage.INFO, "Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) + stg.Log(storage.INFO, stg.Name, "Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) return nil } @@ -107,8 +106,8 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) error { for candidate := range candidates { lenCandidates++ if candidate.Err != nil { - return fmt.Errorf( - "pruneBackups: error looking up candidates from remote storage: %w", + return stg.Log(storage.ERROR, stg.Name, + "Prune: Error looking up candidates from remote storage! %w", candidate.Err, ) } diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index be71e6e3..b60d1e89 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -26,7 +26,7 @@ type sshStorage struct { // NewStorageBackend creates and initializes a new SSH storage backend. func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, - logFunc func(logType storage.LogType, msg string, params ...interface{}), s *types.Stats) (storage.Backend, error) { + logFunc storage.LogFuncDef, s *types.Stats) (storage.Backend, error) { var authMethods []ssh.AuthMethod @@ -64,7 +64,7 @@ func NewStorageBackend(hostName string, port string, user string, password strin sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", hostName, port), sshClientConfig) if err != nil { - return nil, fmt.Errorf("newScript: error creating ssh client: %w", err) + return nil, logFunc(storage.ERROR, "SSH", "NewScript: Error creating ssh client! %w", err) } _, _, err = sshClient.SendRequest("keepalive", false, nil) if err != nil { @@ -73,7 +73,7 @@ func NewStorageBackend(hostName string, port string, user string, password strin sftpClient, err := sftp.NewClient(sshClient) if err != nil { - return nil, fmt.Errorf("newScript: error creating sftp client: %w", err) + return nil, logFunc(storage.ERROR, "SSH", "NewScript: error creating sftp client! %w", err) } strgBackend := &storage.StorageBackend{ @@ -98,13 +98,13 @@ func (stg *sshStorage) Copy(file string) error { source, err := os.Open(file) _, name := path.Split(file) if err != nil { - return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error reading the file to be uploaded! %w", err) } defer source.Close() destination, err := stg.sftpClient.Create(filepath.Join(stg.DestinationPath, name)) if err != nil { - return fmt.Errorf("copyBackup: error creating file on SSH storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error creating file on SSH storage! %w", err) } defer destination.Close() @@ -114,31 +114,31 @@ func (stg *sshStorage) Copy(file string) error { if err == io.EOF { tot, err := destination.Write(chunk[:num]) if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading the file to SSH storage! %w", err) } if tot != len(chunk[:num]) { - return fmt.Errorf("sshClient: failed to write stream") + return stg.Log(storage.ERROR, stg.Name, "sshClient: failed to write stream") } break } if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading the file to SSH storage! %w", err) } tot, err := destination.Write(chunk[:num]) if err != nil { - return fmt.Errorf("copyBackup: error uploading the file to SSH storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading the file to SSH storage! %w", err) } if tot != len(chunk[:num]) { - return fmt.Errorf("sshClient: failed to write stream") + return stg.Log(storage.ERROR, stg.Name, "sshClient: failed to write stream") } } - stg.Log(storage.INFO, "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.hostName, stg.DestinationPath) + stg.Log(storage.INFO, stg.Name, "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.hostName, stg.DestinationPath) return nil } @@ -147,7 +147,7 @@ func (stg *sshStorage) Copy(file string) error { func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) error { candidates, err := stg.sftpClient.ReadDir(stg.DestinationPath) if err != nil { - return fmt.Errorf("pruneBackups: error reading directory from SSH storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Prune: Error reading directory from SSH storage! %w", err) } var matches []string @@ -168,7 +168,7 @@ func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) error { stg.DoPrune(len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { if err := stg.sftpClient.Remove(filepath.Join(stg.DestinationPath, match)); err != nil { - return fmt.Errorf("pruneBackups: error removing file from SSH storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Prune: Error removing file from SSH storage! %w", err) } } return nil diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 7fba6541..fbf1d533 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -18,7 +18,7 @@ type StorageBackend struct { Name string DestinationPath string RetentionDays int - Log func(logType LogType, msg string, params ...interface{}) + Log LogFuncDef Stats *t.Stats } @@ -30,6 +30,8 @@ const ( ERROR LogType = "ERROR" ) +type LogFuncDef func(logType LogType, context string, msg string, params ...interface{}) error + // DoPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { @@ -37,7 +39,7 @@ func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description st if err := doRemoveFiles(); err != nil { return err } - stg.Log(INFO, + stg.Log(INFO, stg.Name, "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", lenMatches, lenCandidates, @@ -45,10 +47,10 @@ func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description st stg.RetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { - stg.Log(WARNING, "The current configuration would delete all %d existing %s.", lenMatches, description) - stg.Log(WARNING, "Refusing to do so, please check your configuration.") + stg.Log(WARNING, stg.Name, "The current configuration would delete all %d existing %s.", lenMatches, description) + stg.Log(WARNING, stg.Name, "Refusing to do so, please check your configuration.") } else { - stg.Log(INFO, "None of %d existing %s were pruned.", lenCandidates, description) + stg.Log(INFO, stg.Name, "None of %d existing %s were pruned.", lenCandidates, description) } return nil } diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index c17ae18c..c51ba441 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -2,7 +2,6 @@ package webdav import ( "errors" - "fmt" "io/fs" "net/http" "os" @@ -24,7 +23,7 @@ type webDavStorage struct { // NewStorageBackend creates and initializes a new WebDav storage backend. func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, - logFunc func(logType storage.LogType, msg string, params ...interface{}), s *types.Stats) (storage.Backend, error) { + logFunc storage.LogFuncDef, s *types.Stats) (storage.Backend, error) { if username == "" || password == "" { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") @@ -62,15 +61,15 @@ func (stg *webDavStorage) Copy(file string) error { bytes, err := os.ReadFile(file) _, name := path.Split(file) if err != nil { - return fmt.Errorf("copyBackup: error reading the file to be uploaded: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error reading the file to be uploaded! %w", err) } if err := stg.client.MkdirAll(stg.DestinationPath, 0644); err != nil { - return fmt.Errorf("copyBackup: error creating directory '%s' on WebDAV server: %w", stg.DestinationPath, err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error creating directory '%s' on WebDAV server! %w", stg.DestinationPath, err) } if err := stg.client.Write(filepath.Join(stg.DestinationPath, name), bytes, 0644); err != nil { - return fmt.Errorf("copyBackup: error uploading the file to WebDAV server: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading the file to WebDAV server! %w", err) } - stg.Log(storage.INFO, "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.url, stg.DestinationPath) + stg.Log(storage.INFO, stg.Name, "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.url, stg.DestinationPath) return nil } @@ -79,7 +78,7 @@ func (stg *webDavStorage) Copy(file string) error { func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) error { candidates, err := stg.client.ReadDir(stg.DestinationPath) if err != nil { - return fmt.Errorf("pruneBackups: error looking up candidates from remote storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Prune: Error looking up candidates from remote storage! %w", err) } var matches []fs.FileInfo var lenCandidates int @@ -101,7 +100,7 @@ func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) error stg.DoPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { if err := stg.client.Remove(filepath.Join(stg.DestinationPath, match.Name())); err != nil { - return fmt.Errorf("pruneBackups: error removing file from WebDAV storage: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Prune: Error removing file from WebDAV storage! %w", err) } } return nil From fbc7a4b0eaa2b93524ae4e21a119bd59c9492d7a Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 25 Jul 2022 19:44:56 +0200 Subject: [PATCH 19/32] Missed an error message --- internal/storage/local/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 712bdcf0..00a3da1d 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -45,7 +45,7 @@ func (stg *localStorage) Copy(file string) error { _, name := path.Split(file) if err := utilites.CopyFile(file, path.Join(stg.DestinationPath, name)); err != nil { - return stg.Log(storage.ERROR, stg.Name, "copyBackup: error copying file to local archive: %w", err) + return stg.Log(storage.ERROR, stg.Name, "Copy: Error copying file to local archive! %w", err) } stg.Log(storage.INFO, stg.Name, "Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) From b34cc4a4900208956ec7bdbf67549cc090c5754c Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Fri, 29 Jul 2022 16:07:16 +0200 Subject: [PATCH 20/32] Moved config back to main. Optimized prune stats handling. --- {internal/types => cmd/backup}/config.go | 2 +- cmd/backup/notifications.go | 2 +- cmd/backup/script.go | 13 +++++++------ internal/storage/local/local.go | 7 +++---- internal/storage/s3/s3.go | 6 +++--- internal/storage/ssh/ssh.go | 6 +++--- internal/storage/storage.go | 2 +- internal/storage/webdav/webdav.go | 6 +++--- 8 files changed, 22 insertions(+), 22 deletions(-) rename {internal/types => cmd/backup}/config.go (99%) diff --git a/internal/types/config.go b/cmd/backup/config.go similarity index 99% rename from internal/types/config.go rename to cmd/backup/config.go index ff531e19..9326d435 100644 --- a/internal/types/config.go +++ b/cmd/backup/config.go @@ -1,7 +1,7 @@ // Copyright 2022 - Offen Authors // SPDX-License-Identifier: MPL-2.0 -package types +package main import ( "fmt" diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index a5fb5b3a..5a5abacd 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -22,7 +22,7 @@ var defaultNotifications string // NotificationData data to be passed to the notification templates type NotificationData struct { Error error - Config *t.Config + Config *Config Stats *t.Stats } diff --git a/cmd/backup/script.go b/cmd/backup/script.go index f817775e..e0be23f0 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -51,7 +51,7 @@ type script struct { encounteredLock bool - c *t.Config + c *Config } // newScript creates all resources needed for the script to perform actions against @@ -61,7 +61,7 @@ type script struct { func newScript() (*script, error) { stdOut, logBuffer := utilites.Buffer(os.Stdout) s := &script{ - c: &t.Config{}, + c: &Config{}, logger: &logrus.Logger{ Out: stdOut, Formatter: new(logrus.TextFormatter), @@ -125,7 +125,7 @@ func newScript() (*script, error) { if s.c.AwsS3BucketName != "" { if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint, - s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc, s.stats); err != nil { + s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc, &s.stats.Storages.S3); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, s3Backend) @@ -134,7 +134,7 @@ func newScript() (*script, error) { if s.c.WebdavUrl != "" { if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword, - s.c.WebdavUrlInsecure, logFunc, s.stats); err != nil { + s.c.WebdavUrlInsecure, logFunc, &s.stats.Storages.WebDAV); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, webdavBackend) @@ -143,14 +143,14 @@ func newScript() (*script, error) { if s.c.SSHHostName != "" { if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile, - s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc, s.stats); err != nil { + s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc, &s.stats.Storages.SSH); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, sshBackend) } } - localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc, s.stats) + localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc, &s.stats.Storages.Local) s.storagePool = append(s.storagePool, localBackend) if s.c.EmailNotificationRecipient != "" { @@ -490,6 +490,7 @@ func (s *script) pruneBackups() error { if err := backend.Prune(deadline, s.c.BackupPruningPrefix); err != nil { return err } + } return nil diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 00a3da1d..a862fe0a 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -18,15 +18,14 @@ type localStorage struct { } // NewStorageBackend creates and initializes a new local storage backend. -func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.LogFuncDef, - s *types.Stats) storage.Backend { +func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.LogFuncDef, stats *types.StorageStats) storage.Backend { strgBackend := &storage.StorageBackend{ Backend: &localStorage{}, Name: "Local", DestinationPath: archivePath, Log: logFunc, - Stats: s, + Stats: stats, } localBackend := &localStorage{ StorageBackend: strgBackend, @@ -109,7 +108,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { } } - stg.Stats.Storages.Local = types.StorageStats{ + stg.Stats = &types.StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 26fe02f5..771cf0a0 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -23,7 +23,7 @@ type s3Storage struct { // NewStorageBackend creates and initializes a new S3/Minio storage backend. func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, - remotePath string, bucket string, storageClass string, logFunc storage.LogFuncDef, s *types.Stats) (storage.Backend, error) { + remotePath string, bucket string, storageClass string, logFunc storage.LogFuncDef, stats *types.StorageStats) (storage.Backend, error) { var creds *credentials.Credentials if accessKeyId != "" && secretAccessKey != "" { @@ -66,7 +66,7 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri Name: "S3", DestinationPath: remotePath, Log: logFunc, - Stats: s, + Stats: stats, } sshBackend := &s3Storage{ StorageBackend: strgBackend, @@ -116,7 +116,7 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) error { } } - stg.Stats.Storages.S3 = types.StorageStats{ + stg.Stats = &types.StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index b60d1e89..0935b0be 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -26,7 +26,7 @@ type sshStorage struct { // NewStorageBackend creates and initializes a new SSH storage backend. func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, - logFunc storage.LogFuncDef, s *types.Stats) (storage.Backend, error) { + logFunc storage.LogFuncDef, stats *types.StorageStats) (storage.Backend, error) { var authMethods []ssh.AuthMethod @@ -81,7 +81,7 @@ func NewStorageBackend(hostName string, port string, user string, password strin Name: "SSH", DestinationPath: remotePath, Log: logFunc, - Stats: s, + Stats: stats, } sshBackend := &sshStorage{ StorageBackend: strgBackend, @@ -160,7 +160,7 @@ func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) error { } } - stg.Stats.Storages.SSH = types.StorageStats{ + stg.Stats = &types.StorageStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } diff --git a/internal/storage/storage.go b/internal/storage/storage.go index fbf1d533..79d2312a 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -19,7 +19,7 @@ type StorageBackend struct { DestinationPath string RetentionDays int Log LogFuncDef - Stats *t.Stats + Stats *t.StorageStats } type LogType string diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index c51ba441..0434cf7b 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -23,7 +23,7 @@ type webDavStorage struct { // NewStorageBackend creates and initializes a new WebDav storage backend. func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, - logFunc storage.LogFuncDef, s *types.Stats) (storage.Backend, error) { + logFunc storage.LogFuncDef, stats *types.StorageStats) (storage.Backend, error) { if username == "" || password == "" { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") @@ -45,7 +45,7 @@ func NewStorageBackend(url string, remotePath string, username string, password Name: "WebDav", DestinationPath: remotePath, Log: logFunc, - Stats: s, + Stats: stats, } webdavBackend := &webDavStorage{ StorageBackend: strgBackend, @@ -92,7 +92,7 @@ func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) error } } - stg.Stats.Storages.WebDAV = types.StorageStats{ + stg.Stats = &types.StorageStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } From b0fd3c70e403b255eaebfe0c30c3db3ac2f23ef5 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Fri, 5 Aug 2022 14:26:44 +0200 Subject: [PATCH 21/32] Move stats back to main package --- cmd/backup/notifications.go | 3 +-- cmd/backup/script.go | 28 ++++++++++++++----------- {internal/types => cmd/backup}/stats.go | 12 ++--------- internal/storage/local/local.go | 20 ++++++++++-------- internal/storage/s3/s3.go | 16 +++++++------- internal/storage/ssh/ssh.go | 16 +++++++------- internal/storage/storage.go | 11 ++++++---- internal/storage/webdav/webdav.go | 18 +++++++++------- 8 files changed, 65 insertions(+), 59 deletions(-) rename {internal/types => cmd/backup}/stats.go (77%) diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index 5a5abacd..dd0e8932 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -12,7 +12,6 @@ import ( "time" sTypes "github.com/containrrr/shoutrrr/pkg/types" - t "github.com/offen/docker-volume-backup/internal/types" utilites "github.com/offen/docker-volume-backup/internal/utilities" ) @@ -23,7 +22,7 @@ var defaultNotifications string type NotificationData struct { Error error Config *Config - Stats *t.Stats + Stats *Stats } // notify sends a notification using the given title and body templates. diff --git a/cmd/backup/script.go b/cmd/backup/script.go index e0be23f0..7c156fab 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -19,7 +19,6 @@ import ( "github.com/offen/docker-volume-backup/internal/storage/s3" "github.com/offen/docker-volume-backup/internal/storage/ssh" "github.com/offen/docker-volume-backup/internal/storage/webdav" - t "github.com/offen/docker-volume-backup/internal/types" utilites "github.com/offen/docker-volume-backup/internal/utilities" "github.com/containrrr/shoutrrr" @@ -47,7 +46,7 @@ type script struct { hookLevel hookLevel file string - stats *t.Stats + stats *Stats encounteredLock bool @@ -68,10 +67,10 @@ func newScript() (*script, error) { Hooks: make(logrus.LevelHooks), Level: logrus.InfoLevel, }, - stats: &t.Stats{ + stats: &Stats{ StartTime: time.Now(), LogOutput: logBuffer, - Storages: t.StoragesStats{}, + Storages: map[string]StorageStats{}, }, } @@ -125,7 +124,7 @@ func newScript() (*script, error) { if s.c.AwsS3BucketName != "" { if s3Backend, err := s3.NewStorageBackend(s.c.AwsEndpoint, s.c.AwsAccessKeyID, s.c.AwsSecretAccessKey, s.c.AwsIamRoleEndpoint, - s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc, &s.stats.Storages.S3); err != nil { + s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, s3Backend) @@ -134,7 +133,7 @@ func newScript() (*script, error) { if s.c.WebdavUrl != "" { if webdavBackend, err := webdav.NewStorageBackend(s.c.WebdavUrl, s.c.WebdavPath, s.c.WebdavUsername, s.c.WebdavPassword, - s.c.WebdavUrlInsecure, logFunc, &s.stats.Storages.WebDAV); err != nil { + s.c.WebdavUrlInsecure, logFunc); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, webdavBackend) @@ -143,14 +142,14 @@ func newScript() (*script, error) { if s.c.SSHHostName != "" { if sshBackend, err := ssh.NewStorageBackend(s.c.SSHHostName, s.c.SSHPort, s.c.SSHUser, s.c.SSHPassword, s.c.SSHIdentityFile, - s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc, &s.stats.Storages.SSH); err != nil { + s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc); err != nil { return nil, err } else { s.storagePool = append(s.storagePool, sshBackend) } } - localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc, &s.stats.Storages.Local) + localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc) s.storagePool = append(s.storagePool, localBackend) if s.c.EmailNotificationRecipient != "" { @@ -280,7 +279,7 @@ func (s *script) stopContainers() (func() error, error) { ) } - s.stats.Containers = t.ContainersStats{ + s.stats.Containers = ContainersStats{ All: uint(len(allContainers)), ToStop: uint(len(containersToStop)), Stopped: uint(len(stoppedContainers)), @@ -460,7 +459,7 @@ func (s *script) copyArchive() error { return fmt.Errorf("copyBackup: unable to stat backup file: %w", err) } else { size := stat.Size() - s.stats.BackupFile = t.BackupFileStats{ + s.stats.BackupFile = BackupFileStats{ Size: uint64(size), Name: name, FullPath: s.file, @@ -487,10 +486,15 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) for _, backend := range s.storagePool { - if err := backend.Prune(deadline, s.c.BackupPruningPrefix); err != nil { + if stats, err := backend.Prune(deadline, s.c.BackupPruningPrefix); err == nil { + s.stats.Storages[backend.GetName()] = StorageStats{ + Total: stats.Total, + Pruned: stats.Pruned, + } + + } else { return err } - } return nil diff --git a/internal/types/stats.go b/cmd/backup/stats.go similarity index 77% rename from internal/types/stats.go rename to cmd/backup/stats.go index a0ad7994..fbb5e11a 100644 --- a/internal/types/stats.go +++ b/cmd/backup/stats.go @@ -1,7 +1,7 @@ // Copyright 2022 - Offen Authors // SPDX-License-Identifier: MPL-2.0 -package types +package main import ( "bytes" @@ -30,14 +30,6 @@ type StorageStats struct { PruneErrors uint } -// StoragesStats stats about each possible archival location (Local, WebDAV, SSH, S3) -type StoragesStats struct { - Local StorageStats - WebDAV StorageStats - SSH StorageStats - S3 StorageStats -} - // Stats global stats regarding script execution type Stats struct { StartTime time.Time @@ -47,5 +39,5 @@ type Stats struct { LogOutput *bytes.Buffer Containers ContainersStats BackupFile BackupFileStats - Storages StoragesStats + Storages map[string]StorageStats } diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index a862fe0a..aa482c9d 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -8,7 +8,6 @@ import ( "time" "github.com/offen/docker-volume-backup/internal/storage" - "github.com/offen/docker-volume-backup/internal/types" utilites "github.com/offen/docker-volume-backup/internal/utilities" ) @@ -18,14 +17,13 @@ type localStorage struct { } // NewStorageBackend creates and initializes a new local storage backend. -func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.LogFuncDef, stats *types.StorageStats) storage.Backend { +func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.LogFuncDef) storage.Backend { strgBackend := &storage.StorageBackend{ Backend: &localStorage{}, Name: "Local", DestinationPath: archivePath, Log: logFunc, - Stats: stats, } localBackend := &localStorage{ StorageBackend: strgBackend, @@ -35,6 +33,10 @@ func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage return strgBackend } +func (stg *localStorage) GetName() string { + return stg.Name +} + // Copy copies the given file to the local storage backend. func (stg *localStorage) Copy(file string) error { if _, err := os.Stat(stg.DestinationPath); os.IsNotExist(err) { @@ -63,14 +65,14 @@ func (stg *localStorage) Copy(file string) error { } // Prune rotates away backups according to the configuration and provided deadline for the local storage backend. -func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { +func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { globPattern := path.Join( stg.DestinationPath, fmt.Sprintf("%s*", pruningPrefix), ) globMatches, err := filepath.Glob(globPattern) if err != nil { - return stg.Log(storage.ERROR, stg.Name, + return nil, stg.Log(storage.ERROR, stg.Name, "Prune: Error looking up matching files using pattern %s! %w", globPattern, err, @@ -81,7 +83,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { for _, candidate := range globMatches { fi, err := os.Lstat(candidate) if err != nil { - return stg.Log(storage.ERROR, stg.Name, + return nil, stg.Log(storage.ERROR, stg.Name, "Prune: Error calling Lstat on file %s! %w", candidate, err, @@ -97,7 +99,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { for _, candidate := range candidates { fi, err := os.Stat(candidate) if err != nil { - return stg.Log(storage.ERROR, stg.Name, + return nil, stg.Log(storage.ERROR, stg.Name, "Prune: Error calling stat on file %s! %w", candidate, err, @@ -108,7 +110,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { } } - stg.Stats = &types.StorageStats{ + stats := &storage.PruneStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } @@ -130,5 +132,5 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) error { return nil }) - return nil + return stats, nil } diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 771cf0a0..4896fcec 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -10,7 +10,6 @@ import ( "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/offen/docker-volume-backup/internal/storage" - "github.com/offen/docker-volume-backup/internal/types" utilites "github.com/offen/docker-volume-backup/internal/utilities" ) @@ -23,7 +22,7 @@ type s3Storage struct { // NewStorageBackend creates and initializes a new S3/Minio storage backend. func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, - remotePath string, bucket string, storageClass string, logFunc storage.LogFuncDef, stats *types.StorageStats) (storage.Backend, error) { + remotePath string, bucket string, storageClass string, logFunc storage.LogFuncDef) (storage.Backend, error) { var creds *credentials.Credentials if accessKeyId != "" && secretAccessKey != "" { @@ -66,7 +65,6 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri Name: "S3", DestinationPath: remotePath, Log: logFunc, - Stats: stats, } sshBackend := &s3Storage{ StorageBackend: strgBackend, @@ -78,6 +76,10 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri return strgBackend, nil } +func (stg *s3Storage) GetName() string { + return stg.Name +} + // Copy copies the given file to the S3/Minio storage backend. func (stg *s3Storage) Copy(file string) error { _, name := path.Split(file) @@ -94,7 +96,7 @@ func (stg *s3Storage) Copy(file string) error { } // Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend. -func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) error { +func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates := stg.client.ListObjects(context.Background(), stg.bucket, minio.ListObjectsOptions{ WithMetadata: true, Prefix: filepath.Join(stg.DestinationPath, pruningPrefix), @@ -106,7 +108,7 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) error { for candidate := range candidates { lenCandidates++ if candidate.Err != nil { - return stg.Log(storage.ERROR, stg.Name, + return nil, stg.Log(storage.ERROR, stg.Name, "Prune: Error looking up candidates from remote storage! %w", candidate.Err, ) @@ -116,7 +118,7 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) error { } } - stg.Stats = &types.StorageStats{ + stats := &storage.PruneStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } @@ -142,5 +144,5 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) error { return nil }) - return nil + return stats, nil } diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index 0935b0be..8610e72a 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -12,7 +12,6 @@ import ( "time" "github.com/offen/docker-volume-backup/internal/storage" - "github.com/offen/docker-volume-backup/internal/types" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" ) @@ -26,7 +25,7 @@ type sshStorage struct { // NewStorageBackend creates and initializes a new SSH storage backend. func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, - logFunc storage.LogFuncDef, stats *types.StorageStats) (storage.Backend, error) { + logFunc storage.LogFuncDef) (storage.Backend, error) { var authMethods []ssh.AuthMethod @@ -81,7 +80,6 @@ func NewStorageBackend(hostName string, port string, user string, password strin Name: "SSH", DestinationPath: remotePath, Log: logFunc, - Stats: stats, } sshBackend := &sshStorage{ StorageBackend: strgBackend, @@ -93,6 +91,10 @@ func NewStorageBackend(hostName string, port string, user string, password strin return strgBackend, nil } +func (stg *sshStorage) GetName() string { + return stg.Name +} + // Copy copies the given file to the SSH storage backend. func (stg *sshStorage) Copy(file string) error { source, err := os.Open(file) @@ -144,10 +146,10 @@ func (stg *sshStorage) Copy(file string) error { } // Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend. -func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) error { +func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates, err := stg.sftpClient.ReadDir(stg.DestinationPath) if err != nil { - return stg.Log(storage.ERROR, stg.Name, "Prune: Error reading directory from SSH storage! %w", err) + return nil, stg.Log(storage.ERROR, stg.Name, "Prune: Error reading directory from SSH storage! %w", err) } var matches []string @@ -160,7 +162,7 @@ func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) error { } } - stg.Stats = &types.StorageStats{ + stats := &storage.PruneStats{ Total: uint(len(candidates)), Pruned: uint(len(matches)), } @@ -174,5 +176,5 @@ func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) error { return nil }) - return nil + return stats, nil } diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 79d2312a..d7bd94f9 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -2,14 +2,13 @@ package storage import ( "time" - - t "github.com/offen/docker-volume-backup/internal/types" ) // Interface for defining functions which all storage providers support. type Backend interface { Copy(file string) error - Prune(deadline time.Time, pruningPrefix string) error + Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error) + GetName() string } // Generic type of storage. Everything here are common properties of all storage types. @@ -19,7 +18,6 @@ type StorageBackend struct { DestinationPath string RetentionDays int Log LogFuncDef - Stats *t.StorageStats } type LogType string @@ -32,6 +30,11 @@ const ( type LogFuncDef func(logType LogType, context string, msg string, params ...interface{}) error +type PruneStats struct { + Total uint + Pruned uint +} + // DoPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index 0434cf7b..1555f0f3 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -11,7 +11,6 @@ import ( "time" "github.com/offen/docker-volume-backup/internal/storage" - "github.com/offen/docker-volume-backup/internal/types" "github.com/studio-b12/gowebdav" ) @@ -23,7 +22,7 @@ type webDavStorage struct { // NewStorageBackend creates and initializes a new WebDav storage backend. func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, - logFunc storage.LogFuncDef, stats *types.StorageStats) (storage.Backend, error) { + logFunc storage.LogFuncDef) (storage.Backend, error) { if username == "" || password == "" { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") @@ -42,10 +41,9 @@ func NewStorageBackend(url string, remotePath string, username string, password strgBackend := &storage.StorageBackend{ Backend: &webDavStorage{}, - Name: "WebDav", DestinationPath: remotePath, Log: logFunc, - Stats: stats, + Name: "WebDav", } webdavBackend := &webDavStorage{ StorageBackend: strgBackend, @@ -56,6 +54,10 @@ func NewStorageBackend(url string, remotePath string, username string, password } } +func (stg *webDavStorage) GetName() string { + return stg.Name +} + // Copy copies the given file to the WebDav storage backend. func (stg *webDavStorage) Copy(file string) error { bytes, err := os.ReadFile(file) @@ -75,10 +77,10 @@ func (stg *webDavStorage) Copy(file string) error { } // Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend. -func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) error { +func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates, err := stg.client.ReadDir(stg.DestinationPath) if err != nil { - return stg.Log(storage.ERROR, stg.Name, "Prune: Error looking up candidates from remote storage! %w", err) + return nil, stg.Log(storage.ERROR, stg.Name, "Prune: Error looking up candidates from remote storage! %w", err) } var matches []fs.FileInfo var lenCandidates int @@ -92,7 +94,7 @@ func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) error } } - stg.Stats = &types.StorageStats{ + stats := &storage.PruneStats{ Total: uint(lenCandidates), Pruned: uint(len(matches)), } @@ -106,5 +108,5 @@ func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) error return nil }) - return nil + return stats, nil } From 3eac5e4c0adcb82e75ca36cd81e42c8de823eb7a Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Fri, 5 Aug 2022 14:29:48 +0200 Subject: [PATCH 22/32] Code doc stuff --- internal/storage/local/local.go | 1 + internal/storage/s3/s3.go | 1 + internal/storage/ssh/ssh.go | 1 + internal/storage/storage.go | 5 +++-- internal/storage/webdav/webdav.go | 1 + 5 files changed, 7 insertions(+), 2 deletions(-) diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index aa482c9d..b3dab50a 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -33,6 +33,7 @@ func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage return strgBackend } +// GetName return the name of the storage backend through the interface func (stg *localStorage) GetName() string { return stg.Name } diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 4896fcec..f03662f3 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -76,6 +76,7 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri return strgBackend, nil } +// GetName return the name of the storage backend through the interface func (stg *s3Storage) GetName() string { return stg.Name } diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index 8610e72a..dc0fdeba 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -91,6 +91,7 @@ func NewStorageBackend(hostName string, port string, user string, password strin return strgBackend, nil } +// GetName return the name of the storage backend through the interface func (stg *sshStorage) GetName() string { return stg.Name } diff --git a/internal/storage/storage.go b/internal/storage/storage.go index d7bd94f9..834093b2 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -4,14 +4,14 @@ import ( "time" ) -// Interface for defining functions which all storage providers support. +// Backend is an interface for defining functions which all storage providers support. type Backend interface { Copy(file string) error Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error) GetName() string } -// Generic type of storage. Everything here are common properties of all storage types. +// StorageBackend is a generic type of storage. Everything here are common properties of all storage types. type StorageBackend struct { Backend Name string @@ -30,6 +30,7 @@ const ( type LogFuncDef func(logType LogType, context string, msg string, params ...interface{}) error +// PruneStats is a wrapper struct for returning stats after pruning type PruneStats struct { Total uint Pruned uint diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index 1555f0f3..95314a7f 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -54,6 +54,7 @@ func NewStorageBackend(url string, remotePath string, username string, password } } +// GetName return the name of the storage backend through the interface func (stg *webDavStorage) GetName() string { return stg.Name } From 3df59b56e8a22ea28d74485b240c8a2961b19d32 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 13:54:51 +0200 Subject: [PATCH 23/32] Apply changes from #136 --- internal/storage/s3/s3.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index f03662f3..9aebca76 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -89,7 +89,8 @@ func (stg *s3Storage) Copy(file string) error { ContentType: "application/tar+gzip", StorageClass: stg.storageClass, }); err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading backup to remote storage! %w", err) + errResp := minio.ToErrorResponse(err) + return stg.Log(storage.ERROR, stg.Name, "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) } stg.Log(storage.INFO, stg.Name, "Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) From 344db2fc6f4546cbbfa761fe4932352d5e4f9d2c Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:02:36 +0200 Subject: [PATCH 24/32] Replace name field with function. --- cmd/backup/script.go | 2 +- internal/storage/local/local.go | 23 +++++++++++------------ internal/storage/s3/s3.go | 13 ++++++------- internal/storage/ssh/ssh.go | 27 +++++++++++++-------------- internal/storage/storage.go | 11 +++++------ internal/storage/webdav/webdav.go | 19 +++++++++---------- 6 files changed, 45 insertions(+), 50 deletions(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 7c156fab..9b1e2002 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -487,7 +487,7 @@ func (s *script) pruneBackups() error { for _, backend := range s.storagePool { if stats, err := backend.Prune(deadline, s.c.BackupPruningPrefix); err == nil { - s.stats.Storages[backend.GetName()] = StorageStats{ + s.stats.Storages[backend.Name()] = StorageStats{ Total: stats.Total, Pruned: stats.Pruned, } diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index b3dab50a..68261a42 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -21,7 +21,6 @@ func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage strgBackend := &storage.StorageBackend{ Backend: &localStorage{}, - Name: "Local", DestinationPath: archivePath, Log: logFunc, } @@ -33,9 +32,9 @@ func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage return strgBackend } -// GetName return the name of the storage backend through the interface -func (stg *localStorage) GetName() string { - return stg.Name +// Name return the name of the storage backend +func (stg *localStorage) Name() string { + return "Local" } // Copy copies the given file to the local storage backend. @@ -47,9 +46,9 @@ func (stg *localStorage) Copy(file string) error { _, name := path.Split(file) if err := utilites.CopyFile(file, path.Join(stg.DestinationPath, name)); err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error copying file to local archive! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error copying file to local archive! %w", err) } - stg.Log(storage.INFO, stg.Name, "Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) + stg.Log(storage.INFO, stg.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) if stg.latestSymlink != "" { symlink := path.Join(stg.DestinationPath, stg.latestSymlink) @@ -57,9 +56,9 @@ func (stg *localStorage) Copy(file string) error { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: error creating latest symlink! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: error creating latest symlink! %w", err) } - stg.Log(storage.INFO, stg.Name, "Created/Updated symlink `%s` for latest backup.", stg.latestSymlink) + stg.Log(storage.INFO, stg.Name(), "Created/Updated symlink `%s` for latest backup.", stg.latestSymlink) } return nil @@ -73,7 +72,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*stora ) globMatches, err := filepath.Glob(globPattern) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name, + return nil, stg.Log(storage.ERROR, stg.Name(), "Prune: Error looking up matching files using pattern %s! %w", globPattern, err, @@ -84,7 +83,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*stora for _, candidate := range globMatches { fi, err := os.Lstat(candidate) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name, + return nil, stg.Log(storage.ERROR, stg.Name(), "Prune: Error calling Lstat on file %s! %w", candidate, err, @@ -100,7 +99,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*stora for _, candidate := range candidates { fi, err := os.Stat(candidate) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name, + return nil, stg.Log(storage.ERROR, stg.Name(), "Prune: Error calling stat on file %s! %w", candidate, err, @@ -124,7 +123,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*stora } } if len(removeErrors) != 0 { - return stg.Log(storage.ERROR, stg.Name, + return stg.Log(storage.ERROR, stg.Name(), "Prune: %d error(s) deleting local files, starting with: %w", len(removeErrors), utilites.Join(removeErrors...), diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 9aebca76..5121f5b2 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -62,7 +62,6 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri strgBackend := &storage.StorageBackend{ Backend: &s3Storage{}, - Name: "S3", DestinationPath: remotePath, Log: logFunc, } @@ -76,9 +75,9 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri return strgBackend, nil } -// GetName return the name of the storage backend through the interface -func (stg *s3Storage) GetName() string { - return stg.Name +// Name returns the name of the storage backend +func (stg *s3Storage) Name() string { + return "S3" } // Copy copies the given file to the S3/Minio storage backend. @@ -90,9 +89,9 @@ func (stg *s3Storage) Copy(file string) error { StorageClass: stg.storageClass, }); err != nil { errResp := minio.ToErrorResponse(err) - return stg.Log(storage.ERROR, stg.Name, "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) + return stg.Log(storage.ERROR, stg.Name(), "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) } - stg.Log(storage.INFO, stg.Name, "Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) + stg.Log(storage.INFO, stg.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) return nil } @@ -110,7 +109,7 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage. for candidate := range candidates { lenCandidates++ if candidate.Err != nil { - return nil, stg.Log(storage.ERROR, stg.Name, + return nil, stg.Log(storage.ERROR, stg.Name(), "Prune: Error looking up candidates from remote storage! %w", candidate.Err, ) diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index dc0fdeba..05425b6f 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -77,7 +77,6 @@ func NewStorageBackend(hostName string, port string, user string, password strin strgBackend := &storage.StorageBackend{ Backend: &sshStorage{}, - Name: "SSH", DestinationPath: remotePath, Log: logFunc, } @@ -91,9 +90,9 @@ func NewStorageBackend(hostName string, port string, user string, password strin return strgBackend, nil } -// GetName return the name of the storage backend through the interface -func (stg *sshStorage) GetName() string { - return stg.Name +// Name returns the name of the storage backend +func (stg *sshStorage) Name() string { + return "SSH" } // Copy copies the given file to the SSH storage backend. @@ -101,13 +100,13 @@ func (stg *sshStorage) Copy(file string) error { source, err := os.Open(file) _, name := path.Split(file) if err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error reading the file to be uploaded! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error reading the file to be uploaded! %w", err) } defer source.Close() destination, err := stg.sftpClient.Create(filepath.Join(stg.DestinationPath, name)) if err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error creating file on SSH storage! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error creating file on SSH storage! %w", err) } defer destination.Close() @@ -117,31 +116,31 @@ func (stg *sshStorage) Copy(file string) error { if err == io.EOF { tot, err := destination.Write(chunk[:num]) if err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading the file to SSH storage! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error uploading the file to SSH storage! %w", err) } if tot != len(chunk[:num]) { - return stg.Log(storage.ERROR, stg.Name, "sshClient: failed to write stream") + return stg.Log(storage.ERROR, stg.Name(), "sshClient: failed to write stream") } break } if err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading the file to SSH storage! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error uploading the file to SSH storage! %w", err) } tot, err := destination.Write(chunk[:num]) if err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading the file to SSH storage! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error uploading the file to SSH storage! %w", err) } if tot != len(chunk[:num]) { - return stg.Log(storage.ERROR, stg.Name, "sshClient: failed to write stream") + return stg.Log(storage.ERROR, stg.Name(), "sshClient: failed to write stream") } } - stg.Log(storage.INFO, stg.Name, "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.hostName, stg.DestinationPath) + stg.Log(storage.INFO, stg.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.hostName, stg.DestinationPath) return nil } @@ -150,7 +149,7 @@ func (stg *sshStorage) Copy(file string) error { func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates, err := stg.sftpClient.ReadDir(stg.DestinationPath) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name, "Prune: Error reading directory from SSH storage! %w", err) + return nil, stg.Log(storage.ERROR, stg.Name(), "Prune: Error reading directory from SSH storage! %w", err) } var matches []string @@ -171,7 +170,7 @@ func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage stg.DoPrune(len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { if err := stg.sftpClient.Remove(filepath.Join(stg.DestinationPath, match)); err != nil { - return stg.Log(storage.ERROR, stg.Name, "Prune: Error removing file from SSH storage! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Prune: Error removing file from SSH storage! %w", err) } } return nil diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 834093b2..d4b56965 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -8,13 +8,12 @@ import ( type Backend interface { Copy(file string) error Prune(deadline time.Time, pruningPrefix string) (*PruneStats, error) - GetName() string + Name() string } // StorageBackend is a generic type of storage. Everything here are common properties of all storage types. type StorageBackend struct { Backend - Name string DestinationPath string RetentionDays int Log LogFuncDef @@ -43,7 +42,7 @@ func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description st if err := doRemoveFiles(); err != nil { return err } - stg.Log(INFO, stg.Name, + stg.Log(INFO, stg.Name(), "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", lenMatches, lenCandidates, @@ -51,10 +50,10 @@ func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description st stg.RetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { - stg.Log(WARNING, stg.Name, "The current configuration would delete all %d existing %s.", lenMatches, description) - stg.Log(WARNING, stg.Name, "Refusing to do so, please check your configuration.") + stg.Log(WARNING, stg.Name(), "The current configuration would delete all %d existing %s.", lenMatches, description) + stg.Log(WARNING, stg.Name(), "Refusing to do so, please check your configuration.") } else { - stg.Log(INFO, stg.Name, "None of %d existing %s were pruned.", lenCandidates, description) + stg.Log(INFO, stg.Name(), "None of %d existing %s were pruned.", lenCandidates, description) } return nil } diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index 95314a7f..aa81f3a0 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -43,7 +43,6 @@ func NewStorageBackend(url string, remotePath string, username string, password Backend: &webDavStorage{}, DestinationPath: remotePath, Log: logFunc, - Name: "WebDav", } webdavBackend := &webDavStorage{ StorageBackend: strgBackend, @@ -54,9 +53,9 @@ func NewStorageBackend(url string, remotePath string, username string, password } } -// GetName return the name of the storage backend through the interface -func (stg *webDavStorage) GetName() string { - return stg.Name +// Name returns the name of the storage backend +func (stg *webDavStorage) Name() string { + return "WebDav" } // Copy copies the given file to the WebDav storage backend. @@ -64,15 +63,15 @@ func (stg *webDavStorage) Copy(file string) error { bytes, err := os.ReadFile(file) _, name := path.Split(file) if err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error reading the file to be uploaded! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error reading the file to be uploaded! %w", err) } if err := stg.client.MkdirAll(stg.DestinationPath, 0644); err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error creating directory '%s' on WebDAV server! %w", stg.DestinationPath, err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error creating directory '%s' on WebDAV server! %w", stg.DestinationPath, err) } if err := stg.client.Write(filepath.Join(stg.DestinationPath, name), bytes, 0644); err != nil { - return stg.Log(storage.ERROR, stg.Name, "Copy: Error uploading the file to WebDAV server! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Copy: Error uploading the file to WebDAV server! %w", err) } - stg.Log(storage.INFO, stg.Name, "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.url, stg.DestinationPath) + stg.Log(storage.INFO, stg.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.url, stg.DestinationPath) return nil } @@ -81,7 +80,7 @@ func (stg *webDavStorage) Copy(file string) error { func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { candidates, err := stg.client.ReadDir(stg.DestinationPath) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name, "Prune: Error looking up candidates from remote storage! %w", err) + return nil, stg.Log(storage.ERROR, stg.Name(), "Prune: Error looking up candidates from remote storage! %w", err) } var matches []fs.FileInfo var lenCandidates int @@ -103,7 +102,7 @@ func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*stor stg.DoPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { if err := stg.client.Remove(filepath.Join(stg.DestinationPath, match.Name())); err != nil { - return stg.Log(storage.ERROR, stg.Name, "Prune: Error removing file from WebDAV storage! %w", err) + return stg.Log(storage.ERROR, stg.Name(), "Prune: Error removing file from WebDAV storage! %w", err) } } return nil From 0f569f35ab7c2ea83a5b1ee4071402ecd7f2d6c9 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:04:24 +0200 Subject: [PATCH 25/32] Changed receiver names from stg to b. --- internal/storage/local/local.go | 34 +++++++++++++++---------------- internal/storage/s3/s3.go | 24 +++++++++++----------- internal/storage/ssh/ssh.go | 34 +++++++++++++++---------------- internal/storage/storage.go | 12 +++++------ internal/storage/webdav/webdav.go | 28 ++++++++++++------------- 5 files changed, 66 insertions(+), 66 deletions(-) diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 68261a42..335cd039 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -33,46 +33,46 @@ func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage } // Name return the name of the storage backend -func (stg *localStorage) Name() string { +func (b *localStorage) Name() string { return "Local" } // Copy copies the given file to the local storage backend. -func (stg *localStorage) Copy(file string) error { - if _, err := os.Stat(stg.DestinationPath); os.IsNotExist(err) { +func (b *localStorage) Copy(file string) error { + if _, err := os.Stat(b.DestinationPath); os.IsNotExist(err) { return nil } _, name := path.Split(file) - if err := utilites.CopyFile(file, path.Join(stg.DestinationPath, name)); err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error copying file to local archive! %w", err) + if err := utilites.CopyFile(file, path.Join(b.DestinationPath, name)); err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error copying file to local archive! %w", err) } - stg.Log(storage.INFO, stg.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, stg.DestinationPath) + b.Log(storage.INFO, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath) - if stg.latestSymlink != "" { - symlink := path.Join(stg.DestinationPath, stg.latestSymlink) + if b.latestSymlink != "" { + symlink := path.Join(b.DestinationPath, b.latestSymlink) if _, err := os.Lstat(symlink); err == nil { os.Remove(symlink) } if err := os.Symlink(name, symlink); err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: error creating latest symlink! %w", err) + return b.Log(storage.ERROR, b.Name(), "Copy: error creating latest symlink! %w", err) } - stg.Log(storage.INFO, stg.Name(), "Created/Updated symlink `%s` for latest backup.", stg.latestSymlink) + b.Log(storage.INFO, b.Name(), "Created/Updated symlink `%s` for latest backup.", b.latestSymlink) } return nil } // Prune rotates away backups according to the configuration and provided deadline for the local storage backend. -func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { +func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { globPattern := path.Join( - stg.DestinationPath, + b.DestinationPath, fmt.Sprintf("%s*", pruningPrefix), ) globMatches, err := filepath.Glob(globPattern) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name(), + return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error looking up matching files using pattern %s! %w", globPattern, err, @@ -83,7 +83,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*stora for _, candidate := range globMatches { fi, err := os.Lstat(candidate) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name(), + return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error calling Lstat on file %s! %w", candidate, err, @@ -99,7 +99,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*stora for _, candidate := range candidates { fi, err := os.Stat(candidate) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name(), + return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error calling stat on file %s! %w", candidate, err, @@ -115,7 +115,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*stora Pruned: uint(len(matches)), } - stg.DoPrune(len(matches), len(candidates), "local backup(s)", func() error { + b.DoPrune(len(matches), len(candidates), "local backup(s)", func() error { var removeErrors []error for _, match := range matches { if err := os.Remove(match); err != nil { @@ -123,7 +123,7 @@ func (stg *localStorage) Prune(deadline time.Time, pruningPrefix string) (*stora } } if len(removeErrors) != 0 { - return stg.Log(storage.ERROR, stg.Name(), + return b.Log(storage.ERROR, b.Name(), "Prune: %d error(s) deleting local files, starting with: %w", len(removeErrors), utilites.Join(removeErrors...), diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 5121f5b2..eb1584d4 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -76,31 +76,31 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri } // Name returns the name of the storage backend -func (stg *s3Storage) Name() string { +func (v *s3Storage) Name() string { return "S3" } // Copy copies the given file to the S3/Minio storage backend. -func (stg *s3Storage) Copy(file string) error { +func (b *s3Storage) Copy(file string) error { _, name := path.Split(file) - if _, err := stg.client.FPutObject(context.Background(), stg.bucket, filepath.Join(stg.DestinationPath, name), file, minio.PutObjectOptions{ + if _, err := b.client.FPutObject(context.Background(), b.bucket, filepath.Join(b.DestinationPath, name), file, minio.PutObjectOptions{ ContentType: "application/tar+gzip", - StorageClass: stg.storageClass, + StorageClass: b.storageClass, }); err != nil { errResp := minio.ToErrorResponse(err) - return stg.Log(storage.ERROR, stg.Name(), "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) + return b.Log(storage.ERROR, b.Name(), "Copy: error uploading backup to remote storage: [Message]: '%s', [Code]: %s, [StatusCode]: %d", errResp.Message, errResp.Code, errResp.StatusCode) } - stg.Log(storage.INFO, stg.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, stg.bucket) + b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to bucket `%s`.", file, b.bucket) return nil } // Prune rotates away backups according to the configuration and provided deadline for the S3/Minio storage backend. -func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { - candidates := stg.client.ListObjects(context.Background(), stg.bucket, minio.ListObjectsOptions{ +func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { + candidates := b.client.ListObjects(context.Background(), b.bucket, minio.ListObjectsOptions{ WithMetadata: true, - Prefix: filepath.Join(stg.DestinationPath, pruningPrefix), + Prefix: filepath.Join(b.DestinationPath, pruningPrefix), Recursive: true, }) @@ -109,7 +109,7 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage. for candidate := range candidates { lenCandidates++ if candidate.Err != nil { - return nil, stg.Log(storage.ERROR, stg.Name(), + return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error looking up candidates from remote storage! %w", candidate.Err, ) @@ -124,7 +124,7 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage. Pruned: uint(len(matches)), } - stg.DoPrune(len(matches), lenCandidates, "remote backup(s)", func() error { + b.DoPrune(len(matches), lenCandidates, "remote backup(s)", func() error { objectsCh := make(chan minio.ObjectInfo) go func() { for _, match := range matches { @@ -132,7 +132,7 @@ func (stg *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage. } close(objectsCh) }() - errChan := stg.client.RemoveObjects(context.Background(), stg.bucket, objectsCh, minio.RemoveObjectsOptions{}) + errChan := b.client.RemoveObjects(context.Background(), b.bucket, objectsCh, minio.RemoveObjectsOptions{}) var removeErrors []error for result := range errChan { if result.Err != nil { diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index 05425b6f..bd8297e4 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -91,22 +91,22 @@ func NewStorageBackend(hostName string, port string, user string, password strin } // Name returns the name of the storage backend -func (stg *sshStorage) Name() string { +func (b *sshStorage) Name() string { return "SSH" } // Copy copies the given file to the SSH storage backend. -func (stg *sshStorage) Copy(file string) error { +func (b *sshStorage) Copy(file string) error { source, err := os.Open(file) _, name := path.Split(file) if err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error reading the file to be uploaded! %w", err) + return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err) } defer source.Close() - destination, err := stg.sftpClient.Create(filepath.Join(stg.DestinationPath, name)) + destination, err := b.sftpClient.Create(filepath.Join(b.DestinationPath, name)) if err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error creating file on SSH storage! %w", err) + return b.Log(storage.ERROR, b.Name(), "Copy: Error creating file on SSH storage! %w", err) } defer destination.Close() @@ -116,40 +116,40 @@ func (stg *sshStorage) Copy(file string) error { if err == io.EOF { tot, err := destination.Write(chunk[:num]) if err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) } if tot != len(chunk[:num]) { - return stg.Log(storage.ERROR, stg.Name(), "sshClient: failed to write stream") + return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream") } break } if err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) } tot, err := destination.Write(chunk[:num]) if err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error uploading the file to SSH storage! %w", err) + return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to SSH storage! %w", err) } if tot != len(chunk[:num]) { - return stg.Log(storage.ERROR, stg.Name(), "sshClient: failed to write stream") + return b.Log(storage.ERROR, b.Name(), "sshClient: failed to write stream") } } - stg.Log(storage.INFO, stg.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, stg.hostName, stg.DestinationPath) + b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to SSH storage '%s' at path '%s'.", file, b.hostName, b.DestinationPath) return nil } // Prune rotates away backups according to the configuration and provided deadline for the SSH storage backend. -func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { - candidates, err := stg.sftpClient.ReadDir(stg.DestinationPath) +func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { + candidates, err := b.sftpClient.ReadDir(b.DestinationPath) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name(), "Prune: Error reading directory from SSH storage! %w", err) + return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error reading directory from SSH storage! %w", err) } var matches []string @@ -167,10 +167,10 @@ func (stg *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage Pruned: uint(len(matches)), } - stg.DoPrune(len(matches), len(candidates), "SSH backup(s)", func() error { + b.DoPrune(len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { - if err := stg.sftpClient.Remove(filepath.Join(stg.DestinationPath, match)); err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Prune: Error removing file from SSH storage! %w", err) + if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil { + return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from SSH storage! %w", err) } } return nil diff --git a/internal/storage/storage.go b/internal/storage/storage.go index d4b56965..09344b7f 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -37,23 +37,23 @@ type PruneStats struct { // DoPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. -func (stg *StorageBackend) DoPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { +func (b *StorageBackend) DoPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { if lenMatches != 0 && lenMatches != lenCandidates { if err := doRemoveFiles(); err != nil { return err } - stg.Log(INFO, stg.Name(), + b.Log(INFO, b.Name(), "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", lenMatches, lenCandidates, description, - stg.RetentionDays, + b.RetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { - stg.Log(WARNING, stg.Name(), "The current configuration would delete all %d existing %s.", lenMatches, description) - stg.Log(WARNING, stg.Name(), "Refusing to do so, please check your configuration.") + b.Log(WARNING, b.Name(), "The current configuration would delete all %d existing %s.", lenMatches, description) + b.Log(WARNING, b.Name(), "Refusing to do so, please check your configuration.") } else { - stg.Log(INFO, stg.Name(), "None of %d existing %s were pruned.", lenCandidates, description) + b.Log(INFO, b.Name(), "None of %d existing %s were pruned.", lenCandidates, description) } return nil } diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index aa81f3a0..c147625a 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -54,33 +54,33 @@ func NewStorageBackend(url string, remotePath string, username string, password } // Name returns the name of the storage backend -func (stg *webDavStorage) Name() string { +func (b *webDavStorage) Name() string { return "WebDav" } // Copy copies the given file to the WebDav storage backend. -func (stg *webDavStorage) Copy(file string) error { +func (b *webDavStorage) Copy(file string) error { bytes, err := os.ReadFile(file) _, name := path.Split(file) if err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error reading the file to be uploaded! %w", err) + return b.Log(storage.ERROR, b.Name(), "Copy: Error reading the file to be uploaded! %w", err) } - if err := stg.client.MkdirAll(stg.DestinationPath, 0644); err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error creating directory '%s' on WebDAV server! %w", stg.DestinationPath, err) + if err := b.client.MkdirAll(b.DestinationPath, 0644); err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error creating directory '%s' on WebDAV server! %w", b.DestinationPath, err) } - if err := stg.client.Write(filepath.Join(stg.DestinationPath, name), bytes, 0644); err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Copy: Error uploading the file to WebDAV server! %w", err) + if err := b.client.Write(filepath.Join(b.DestinationPath, name), bytes, 0644); err != nil { + return b.Log(storage.ERROR, b.Name(), "Copy: Error uploading the file to WebDAV server! %w", err) } - stg.Log(storage.INFO, stg.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, stg.url, stg.DestinationPath) + b.Log(storage.INFO, b.Name(), "Uploaded a copy of backup `%s` to WebDAV-URL '%s' at path '%s'.", file, b.url, b.DestinationPath) return nil } // Prune rotates away backups according to the configuration and provided deadline for the WebDav storage backend. -func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { - candidates, err := stg.client.ReadDir(stg.DestinationPath) +func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.PruneStats, error) { + candidates, err := b.client.ReadDir(b.DestinationPath) if err != nil { - return nil, stg.Log(storage.ERROR, stg.Name(), "Prune: Error looking up candidates from remote storage! %w", err) + return nil, b.Log(storage.ERROR, b.Name(), "Prune: Error looking up candidates from remote storage! %w", err) } var matches []fs.FileInfo var lenCandidates int @@ -99,10 +99,10 @@ func (stg *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*stor Pruned: uint(len(matches)), } - stg.DoPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { + b.DoPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { - if err := stg.client.Remove(filepath.Join(stg.DestinationPath, match.Name())); err != nil { - return stg.Log(storage.ERROR, stg.Name(), "Prune: Error removing file from WebDAV storage! %w", err) + if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil { + return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from WebDAV storage! %w", err) } } return nil From 70f18aa3cc061fc827811832700662f6926cf33e Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:04:48 +0200 Subject: [PATCH 26/32] Renamed LogFuncDef to Log --- internal/storage/local/local.go | 2 +- internal/storage/s3/s3.go | 2 +- internal/storage/ssh/ssh.go | 2 +- internal/storage/storage.go | 4 ++-- internal/storage/webdav/webdav.go | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 335cd039..5b33ec31 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -17,7 +17,7 @@ type localStorage struct { } // NewStorageBackend creates and initializes a new local storage backend. -func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.LogFuncDef) storage.Backend { +func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.Log) storage.Backend { strgBackend := &storage.StorageBackend{ Backend: &localStorage{}, diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index eb1584d4..df4f5cba 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -22,7 +22,7 @@ type s3Storage struct { // NewStorageBackend creates and initializes a new S3/Minio storage backend. func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey string, iamRoleEndpoint string, endpointProto string, endpointInsecure bool, - remotePath string, bucket string, storageClass string, logFunc storage.LogFuncDef) (storage.Backend, error) { + remotePath string, bucket string, storageClass string, logFunc storage.Log) (storage.Backend, error) { var creds *credentials.Credentials if accessKeyId != "" && secretAccessKey != "" { diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index bd8297e4..89465098 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -25,7 +25,7 @@ type sshStorage struct { // NewStorageBackend creates and initializes a new SSH storage backend. func NewStorageBackend(hostName string, port string, user string, password string, identityFile string, identityPassphrase string, remotePath string, - logFunc storage.LogFuncDef) (storage.Backend, error) { + logFunc storage.Log) (storage.Backend, error) { var authMethods []ssh.AuthMethod diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 09344b7f..3a3ca5b0 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -16,7 +16,7 @@ type StorageBackend struct { Backend DestinationPath string RetentionDays int - Log LogFuncDef + Log Log } type LogType string @@ -27,7 +27,7 @@ const ( ERROR LogType = "ERROR" ) -type LogFuncDef func(logType LogType, context string, msg string, params ...interface{}) error +type Log func(logType LogType, context string, msg string, params ...interface{}) error // PruneStats is a wrapper struct for returning stats after pruning type PruneStats struct { diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index c147625a..e66d6bdb 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -22,7 +22,7 @@ type webDavStorage struct { // NewStorageBackend creates and initializes a new WebDav storage backend. func NewStorageBackend(url string, remotePath string, username string, password string, urlInsecure bool, - logFunc storage.LogFuncDef) (storage.Backend, error) { + logFunc storage.Log) (storage.Backend, error) { if username == "" || password == "" { return nil, errors.New("newScript: WEBDAV_URL is defined, but no credentials were provided") From 17f19b97c5fc82f879d7a056a993ecfb8ccc41ef Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:09:05 +0200 Subject: [PATCH 27/32] Removed redundant package name. --- cmd/backup/hooks.go | 2 +- cmd/backup/lock.go | 2 +- cmd/backup/notifications.go | 2 +- cmd/backup/script.go | 2 +- internal/storage/local/local.go | 2 +- internal/storage/s3/s3.go | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/backup/hooks.go b/cmd/backup/hooks.go index 92ade4b1..3d73f112 100644 --- a/cmd/backup/hooks.go +++ b/cmd/backup/hooks.go @@ -7,7 +7,7 @@ import ( "fmt" "sort" - utilites "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/offen/docker-volume-backup/internal/utilities" ) // hook contains a queued action that can be trigger them when the script diff --git a/cmd/backup/lock.go b/cmd/backup/lock.go index 5c684885..ec17ef6d 100644 --- a/cmd/backup/lock.go +++ b/cmd/backup/lock.go @@ -9,7 +9,7 @@ import ( "time" "github.com/gofrs/flock" - utilites "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/offen/docker-volume-backup/internal/utilities" ) // lock opens a lockfile at the given location, keeping it locked until the diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index dd0e8932..d50d068e 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -12,7 +12,7 @@ import ( "time" sTypes "github.com/containrrr/shoutrrr/pkg/types" - utilites "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/offen/docker-volume-backup/internal/utilities" ) //go:embed notifications.tmpl diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 9b1e2002..cb3ffc78 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -19,7 +19,7 @@ import ( "github.com/offen/docker-volume-backup/internal/storage/s3" "github.com/offen/docker-volume-backup/internal/storage/ssh" "github.com/offen/docker-volume-backup/internal/storage/webdav" - utilites "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/offen/docker-volume-backup/internal/utilities" "github.com/containrrr/shoutrrr" "github.com/containrrr/shoutrrr/pkg/router" diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 5b33ec31..9b6b6494 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -8,7 +8,7 @@ import ( "time" "github.com/offen/docker-volume-backup/internal/storage" - utilites "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/offen/docker-volume-backup/internal/utilities" ) type localStorage struct { diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index df4f5cba..f84cdd74 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -10,7 +10,7 @@ import ( "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/offen/docker-volume-backup/internal/storage" - utilites "github.com/offen/docker-volume-backup/internal/utilities" + "github.com/offen/docker-volume-backup/internal/utilities" ) type s3Storage struct { From e69e32761f141495a0d2dd7ee709ab497433a295 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:10:27 +0200 Subject: [PATCH 28/32] Renamed storagePool to storages. --- cmd/backup/script.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index cb3ffc78..ba7baa7f 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -37,13 +37,13 @@ import ( // script holds all the stateful information required to orchestrate a // single backup run. type script struct { - cli *client.Client - storagePool []storage.Backend - logger *logrus.Logger - sender *router.ServiceRouter - template *template.Template - hooks []hook - hookLevel hookLevel + cli *client.Client + storages []storage.Backend + logger *logrus.Logger + sender *router.ServiceRouter + template *template.Template + hooks []hook + hookLevel hookLevel file string stats *Stats @@ -127,7 +127,7 @@ func newScript() (*script, error) { s.c.AwsEndpointProto, s.c.AwsEndpointInsecure, s.c.AwsS3Path, s.c.AwsS3BucketName, s.c.AwsStorageClass, logFunc); err != nil { return nil, err } else { - s.storagePool = append(s.storagePool, s3Backend) + s.storages = append(s.storages, s3Backend) } } @@ -136,7 +136,7 @@ func newScript() (*script, error) { s.c.WebdavUrlInsecure, logFunc); err != nil { return nil, err } else { - s.storagePool = append(s.storagePool, webdavBackend) + s.storages = append(s.storages, webdavBackend) } } @@ -145,12 +145,12 @@ func newScript() (*script, error) { s.c.SSHIdentityPassphrase, s.c.SSHRemotePath, logFunc); err != nil { return nil, err } else { - s.storagePool = append(s.storagePool, sshBackend) + s.storages = append(s.storages, sshBackend) } } localBackend := local.NewStorageBackend(s.c.BackupArchive, s.c.BackupLatestSymlink, logFunc) - s.storagePool = append(s.storagePool, localBackend) + s.storages = append(s.storages, localBackend) if s.c.EmailNotificationRecipient != "" { emailURL := fmt.Sprintf( @@ -466,7 +466,7 @@ func (s *script) copyArchive() error { } } - for _, backend := range s.storagePool { + for _, backend := range s.storages { if err := backend.Copy(s.file); err != nil { return err } @@ -485,7 +485,7 @@ func (s *script) pruneBackups() error { deadline := time.Now().AddDate(0, 0, -int(s.c.BackupRetentionDays)).Add(s.c.BackupPruningLeeway) - for _, backend := range s.storagePool { + for _, backend := range s.storages { if stats, err := backend.Prune(deadline, s.c.BackupPruningPrefix); err == nil { s.stats.Storages[backend.Name()] = StorageStats{ Total: stats.Total, From 0efa7fdfa8008e8913308e81328b0c087a257c6d Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:17:43 +0200 Subject: [PATCH 29/32] Simplified creation of new storage backend. --- internal/storage/local/local.go | 19 +++++++------------ internal/storage/s3/s3.go | 24 ++++++++++-------------- internal/storage/ssh/ssh.go | 24 ++++++++++-------------- internal/storage/storage.go | 11 +++++------ internal/storage/webdav/webdav.go | 20 ++++++++------------ 5 files changed, 40 insertions(+), 58 deletions(-) diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 9b6b6494..6467c08d 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -18,18 +18,13 @@ type localStorage struct { // NewStorageBackend creates and initializes a new local storage backend. func NewStorageBackend(archivePath string, latestSymlink string, logFunc storage.Log) storage.Backend { - - strgBackend := &storage.StorageBackend{ - Backend: &localStorage{}, - DestinationPath: archivePath, - Log: logFunc, - } - localBackend := &localStorage{ - StorageBackend: strgBackend, - latestSymlink: latestSymlink, + return &localStorage{ + StorageBackend: &storage.StorageBackend{ + DestinationPath: archivePath, + Log: logFunc, + }, + latestSymlink: latestSymlink, } - strgBackend.Backend = localBackend - return strgBackend } // Name return the name of the storage backend @@ -115,7 +110,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage Pruned: uint(len(matches)), } - b.DoPrune(len(matches), len(candidates), "local backup(s)", func() error { + b.DoPrune(b.Name(), len(matches), len(candidates), "local backup(s)", func() error { var removeErrors []error for _, match := range matches { if err := os.Remove(match); err != nil { diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index f84cdd74..4c84a05a 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -60,19 +60,15 @@ func NewStorageBackend(endpoint string, accessKeyId string, secretAccessKey stri return nil, logFunc(storage.ERROR, "S3", "NewScript: error setting up minio client: %w", err) } - strgBackend := &storage.StorageBackend{ - Backend: &s3Storage{}, - DestinationPath: remotePath, - Log: logFunc, - } - sshBackend := &s3Storage{ - StorageBackend: strgBackend, - client: mc, - bucket: bucket, - storageClass: storageClass, - } - strgBackend.Backend = sshBackend - return strgBackend, nil + return &s3Storage{ + StorageBackend: &storage.StorageBackend{ + DestinationPath: remotePath, + Log: logFunc, + }, + client: mc, + bucket: bucket, + storageClass: storageClass, + }, nil } // Name returns the name of the storage backend @@ -124,7 +120,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr Pruned: uint(len(matches)), } - b.DoPrune(len(matches), lenCandidates, "remote backup(s)", func() error { + b.DoPrune(b.Name(), len(matches), lenCandidates, "remote backup(s)", func() error { objectsCh := make(chan minio.ObjectInfo) go func() { for _, match := range matches { diff --git a/internal/storage/ssh/ssh.go b/internal/storage/ssh/ssh.go index 89465098..c7dad707 100644 --- a/internal/storage/ssh/ssh.go +++ b/internal/storage/ssh/ssh.go @@ -75,19 +75,15 @@ func NewStorageBackend(hostName string, port string, user string, password strin return nil, logFunc(storage.ERROR, "SSH", "NewScript: error creating sftp client! %w", err) } - strgBackend := &storage.StorageBackend{ - Backend: &sshStorage{}, - DestinationPath: remotePath, - Log: logFunc, - } - sshBackend := &sshStorage{ - StorageBackend: strgBackend, - client: sshClient, - sftpClient: sftpClient, - hostName: hostName, - } - strgBackend.Backend = sshBackend - return strgBackend, nil + return &sshStorage{ + StorageBackend: &storage.StorageBackend{ + DestinationPath: remotePath, + Log: logFunc, + }, + client: sshClient, + sftpClient: sftpClient, + hostName: hostName, + }, nil } // Name returns the name of the storage backend @@ -167,7 +163,7 @@ func (b *sshStorage) Prune(deadline time.Time, pruningPrefix string) (*storage.P Pruned: uint(len(matches)), } - b.DoPrune(len(matches), len(candidates), "SSH backup(s)", func() error { + b.DoPrune(b.Name(), len(matches), len(candidates), "SSH backup(s)", func() error { for _, match := range matches { if err := b.sftpClient.Remove(filepath.Join(b.DestinationPath, match)); err != nil { return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from SSH storage! %w", err) diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 3a3ca5b0..d33d2f4f 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -13,7 +13,6 @@ type Backend interface { // StorageBackend is a generic type of storage. Everything here are common properties of all storage types. type StorageBackend struct { - Backend DestinationPath string RetentionDays int Log Log @@ -37,12 +36,12 @@ type PruneStats struct { // DoPrune holds general control flow that applies to any kind of storage. // Callers can pass in a thunk that performs the actual deletion of files. -func (b *StorageBackend) DoPrune(lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { +func (b *StorageBackend) DoPrune(context string, lenMatches, lenCandidates int, description string, doRemoveFiles func() error) error { if lenMatches != 0 && lenMatches != lenCandidates { if err := doRemoveFiles(); err != nil { return err } - b.Log(INFO, b.Name(), + b.Log(INFO, context, "Pruned %d out of %d %s as their age exceeded the configured retention period of %d days.", lenMatches, lenCandidates, @@ -50,10 +49,10 @@ func (b *StorageBackend) DoPrune(lenMatches, lenCandidates int, description stri b.RetentionDays, ) } else if lenMatches != 0 && lenMatches == lenCandidates { - b.Log(WARNING, b.Name(), "The current configuration would delete all %d existing %s.", lenMatches, description) - b.Log(WARNING, b.Name(), "Refusing to do so, please check your configuration.") + b.Log(WARNING, context, "The current configuration would delete all %d existing %s.", lenMatches, description) + b.Log(WARNING, context, "Refusing to do so, please check your configuration.") } else { - b.Log(INFO, b.Name(), "None of %d existing %s were pruned.", lenCandidates, description) + b.Log(INFO, context, "None of %d existing %s were pruned.", lenCandidates, description) } return nil } diff --git a/internal/storage/webdav/webdav.go b/internal/storage/webdav/webdav.go index e66d6bdb..e2d7fd78 100644 --- a/internal/storage/webdav/webdav.go +++ b/internal/storage/webdav/webdav.go @@ -39,17 +39,13 @@ func NewStorageBackend(url string, remotePath string, username string, password webdavClient.SetTransport(webdavTransport) } - strgBackend := &storage.StorageBackend{ - Backend: &webDavStorage{}, - DestinationPath: remotePath, - Log: logFunc, - } - webdavBackend := &webDavStorage{ - StorageBackend: strgBackend, - client: webdavClient, - } - strgBackend.Backend = webdavBackend - return strgBackend, nil + return &webDavStorage{ + StorageBackend: &storage.StorageBackend{ + DestinationPath: remotePath, + Log: logFunc, + }, + client: webdavClient, + }, nil } } @@ -99,7 +95,7 @@ func (b *webDavStorage) Prune(deadline time.Time, pruningPrefix string) (*storag Pruned: uint(len(matches)), } - b.DoPrune(len(matches), lenCandidates, "WebDAV backup(s)", func() error { + b.DoPrune(b.Name(), len(matches), lenCandidates, "WebDAV backup(s)", func() error { for _, match := range matches { if err := b.client.Remove(filepath.Join(b.DestinationPath, match.Name())); err != nil { return b.Log(storage.ERROR, b.Name(), "Prune: Error removing file from WebDAV storage! %w", err) From 847a72e22e61194008369cd7651f450ac8429499 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:21:00 +0200 Subject: [PATCH 30/32] Added initialization for storage stats map. --- cmd/backup/script.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/backup/script.go b/cmd/backup/script.go index ba7baa7f..1f076b10 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -70,7 +70,12 @@ func newScript() (*script, error) { stats: &Stats{ StartTime: time.Now(), LogOutput: logBuffer, - Storages: map[string]StorageStats{}, + Storages: map[string]StorageStats{ + "S3": {}, + "WebDav": {}, + "SSH": {}, + "Local": {}, + }, }, } From 03c98d9539d8b1ca7ae99400bae658585e91a3e6 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 14:40:25 +0200 Subject: [PATCH 31/32] Invert .dockerignore patterns. --- .dockerignore | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/.dockerignore b/.dockerignore index d9026017..246a56f6 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,8 +1,19 @@ -test -.cirecleci -.github -docs -.editorconfig -Dockerfile -LICENSE -README.md \ No newline at end of file +# Ignore everything +* + +# Exceptions: +# Note: Wildcards for directories like * or ** don't work (yet) with exclamation marks! + +!cmd/backup/*.go +!cmd/backup/*.tmpl + +!internal/storage/*.go +!internal/storage/local/*.go +!internal/storage/s3/*.go +!internal/storage/ssh/*.go +!internal/storage/webdav/*.go +!internal/utilities/*.go + +!Dockerfile +!entrypoint.sh +!go.* \ No newline at end of file From b7f644b4065ca99cd15d8b5ec5d3ba6a42976887 Mon Sep 17 00:00:00 2001 From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com> Date: Mon, 15 Aug 2022 15:07:25 +0200 Subject: [PATCH 32/32] Fix package typo --- cmd/backup/hooks.go | 2 +- cmd/backup/lock.go | 4 ++-- cmd/backup/notifications.go | 2 +- cmd/backup/script.go | 20 ++++++++++---------- internal/storage/local/local.go | 4 ++-- internal/storage/s3/s3.go | 2 +- internal/utilities/util.go | 2 +- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/cmd/backup/hooks.go b/cmd/backup/hooks.go index 3d73f112..777f396a 100644 --- a/cmd/backup/hooks.go +++ b/cmd/backup/hooks.go @@ -52,7 +52,7 @@ func (s *script) runHooks(err error) error { } } if len(actionErrors) != 0 { - return utilites.Join(actionErrors...) + return utilities.Join(actionErrors...) } return nil } diff --git a/cmd/backup/lock.go b/cmd/backup/lock.go index ec17ef6d..e3339648 100644 --- a/cmd/backup/lock.go +++ b/cmd/backup/lock.go @@ -32,7 +32,7 @@ func (s *script) lock(lockfile string) (func() error, error) { for { acquired, err := fileLock.TryLock() if err != nil { - return utilites.Noop, fmt.Errorf("lock: error trying lock: %w", err) + return utilities.Noop, fmt.Errorf("lock: error trying lock: %w", err) } if acquired { if s.encounteredLock { @@ -53,7 +53,7 @@ func (s *script) lock(lockfile string) (func() error, error) { case <-retry.C: continue case <-deadline.C: - return utilites.Noop, errors.New("lock: timed out waiting for lockfile to become available") + return utilities.Noop, errors.New("lock: timed out waiting for lockfile to become available") } } } diff --git a/cmd/backup/notifications.go b/cmd/backup/notifications.go index d50d068e..67b560c6 100644 --- a/cmd/backup/notifications.go +++ b/cmd/backup/notifications.go @@ -69,7 +69,7 @@ func (s *script) sendNotification(title, body string) error { } } if len(errs) != 0 { - return fmt.Errorf("sendNotification: error sending message: %w", utilites.Join(errs...)) + return fmt.Errorf("sendNotification: error sending message: %w", utilities.Join(errs...)) } return nil } diff --git a/cmd/backup/script.go b/cmd/backup/script.go index 1f076b10..ba17ffeb 100644 --- a/cmd/backup/script.go +++ b/cmd/backup/script.go @@ -58,7 +58,7 @@ type script struct { // reading from env vars or other configuration sources is expected to happen // in this method. func newScript() (*script, error) { - stdOut, logBuffer := utilites.Buffer(os.Stdout) + stdOut, logBuffer := utilities.Buffer(os.Stdout) s := &script{ c: &Config{}, logger: &logrus.Logger{ @@ -228,14 +228,14 @@ func newScript() (*script, error) { // restart everything that has been stopped. func (s *script) stopContainers() (func() error, error) { if s.cli == nil { - return utilites.Noop, nil + return utilities.Noop, nil } allContainers, err := s.cli.ContainerList(context.Background(), types.ContainerListOptions{ Quiet: true, }) if err != nil { - return utilites.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) + return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers: %w", err) } containerLabel := fmt.Sprintf( @@ -251,11 +251,11 @@ func (s *script) stopContainers() (func() error, error) { }) if err != nil { - return utilites.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) + return utilities.Noop, fmt.Errorf("stopContainersAndRun: error querying for containers to stop: %w", err) } if len(containersToStop) == 0 { - return utilites.Noop, nil + return utilities.Noop, nil } s.logger.Infof( @@ -280,7 +280,7 @@ func (s *script) stopContainers() (func() error, error) { stopError = fmt.Errorf( "stopContainersAndRun: %d error(s) stopping containers: %w", len(stopErrors), - utilites.Join(stopErrors...), + utilities.Join(stopErrors...), ) } @@ -331,7 +331,7 @@ func (s *script) stopContainers() (func() error, error) { return fmt.Errorf( "stopContainersAndRun: %d error(s) restarting containers and services: %w", len(restartErrors), - utilites.Join(restartErrors...), + utilities.Join(restartErrors...), ) } s.logger.Infof( @@ -357,7 +357,7 @@ func (s *script) createArchive() error { backupSources = filepath.Join("/tmp", s.c.BackupSources) // copy before compressing guard against a situation where backup folder's content are still growing. s.registerHook(hookLevelPlumbing, func(error) error { - if err := utilites.Remove(backupSources); err != nil { + if err := utilities.Remove(backupSources); err != nil { return fmt.Errorf("takeBackup: error removing snapshot: %w", err) } s.logger.Infof("Removed snapshot `%s`.", backupSources) @@ -374,7 +374,7 @@ func (s *script) createArchive() error { tarFile := s.file s.registerHook(hookLevelPlumbing, func(error) error { - if err := utilites.Remove(tarFile); err != nil { + if err := utilities.Remove(tarFile); err != nil { return fmt.Errorf("takeBackup: error removing tar file: %w", err) } s.logger.Infof("Removed tar file `%s`.", tarFile) @@ -419,7 +419,7 @@ func (s *script) encryptArchive() error { gpgFile := fmt.Sprintf("%s.gpg", s.file) s.registerHook(hookLevelPlumbing, func(error) error { - if err := utilites.Remove(gpgFile); err != nil { + if err := utilities.Remove(gpgFile); err != nil { return fmt.Errorf("encryptBackup: error removing gpg file: %w", err) } s.logger.Infof("Removed GPG file `%s`.", gpgFile) diff --git a/internal/storage/local/local.go b/internal/storage/local/local.go index 6467c08d..5d9158b7 100644 --- a/internal/storage/local/local.go +++ b/internal/storage/local/local.go @@ -40,7 +40,7 @@ func (b *localStorage) Copy(file string) error { _, name := path.Split(file) - if err := utilites.CopyFile(file, path.Join(b.DestinationPath, name)); err != nil { + if err := utilities.CopyFile(file, path.Join(b.DestinationPath, name)); err != nil { return b.Log(storage.ERROR, b.Name(), "Copy: Error copying file to local archive! %w", err) } b.Log(storage.INFO, b.Name(), "Stored copy of backup `%s` in local archive `%s`.", file, b.DestinationPath) @@ -121,7 +121,7 @@ func (b *localStorage) Prune(deadline time.Time, pruningPrefix string) (*storage return b.Log(storage.ERROR, b.Name(), "Prune: %d error(s) deleting local files, starting with: %w", len(removeErrors), - utilites.Join(removeErrors...), + utilities.Join(removeErrors...), ) } return nil diff --git a/internal/storage/s3/s3.go b/internal/storage/s3/s3.go index 4c84a05a..cb162ead 100644 --- a/internal/storage/s3/s3.go +++ b/internal/storage/s3/s3.go @@ -136,7 +136,7 @@ func (b *s3Storage) Prune(deadline time.Time, pruningPrefix string) (*storage.Pr } } if len(removeErrors) != 0 { - return utilites.Join(removeErrors...) + return utilities.Join(removeErrors...) } return nil }) diff --git a/internal/utilities/util.go b/internal/utilities/util.go index 45bbf01e..5d55a2d6 100644 --- a/internal/utilities/util.go +++ b/internal/utilities/util.go @@ -1,7 +1,7 @@ // Copyright 2022 - Offen Authors // SPDX-License-Identifier: MPL-2.0 -package utilites +package utilities import ( "bytes"