From 66b6f40b613fe92c8609b7f67468fb11e4bf76e9 Mon Sep 17 00:00:00 2001 From: Dane Everitt Date: Sun, 17 Jan 2021 21:05:51 -0800 Subject: [PATCH] Fix import cycle issue --- router/router.go | 1 + router/router_server_backup.go | 44 +++++++-- server/{filesystem => backup}/archive.go | 16 +-- server/backup/backup.go | 35 +------ server/backup/backup_local.go | 29 +++--- server/backup/backup_s3.go | 24 +++-- server/filesystem/compress.go | 104 ++++++++++++++++++-- server/filesystem/decompress.go | 118 ----------------------- system/utils.go | 21 ++++ 9 files changed, 193 insertions(+), 199 deletions(-) rename server/{filesystem => backup}/archive.go (98%) delete mode 100644 server/filesystem/decompress.go diff --git a/router/router.go b/router/router.go index 00eea02..727ceae 100644 --- a/router/router.go +++ b/router/router.go @@ -95,6 +95,7 @@ func Configure() *gin.Engine { backup := server.Group("/backup") { backup.POST("", postServerBackup) + backup.POST("/:backup/restore", postServerRestoreBackup) backup.DELETE("/:backup", deleteServerBackup) } } diff --git a/router/router_server_backup.go b/router/router_server_backup.go index 15be67b..dc7946e 100644 --- a/router/router_server_backup.go +++ b/router/router_server_backup.go @@ -7,9 +7,11 @@ import ( "emperror.dev/errors" "github.com/apex/log" "github.com/gin-gonic/gin" + "github.com/mholt/archiver/v3" "github.com/pterodactyl/wings/router/middleware" "github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server/backup" + "github.com/pterodactyl/wings/system" ) // postServerBackup performs a backup against a given server instance using the @@ -17,16 +19,26 @@ import ( func postServerBackup(c *gin.Context) { s := middleware.ExtractServer(c) logger := middleware.ExtractLogger(c) - var data backup.Request + var data struct { + Adapter backup.AdapterType `json:"adapter"` + Uuid string `json:"uuid"` + Ignore string `json:"ignore"` + } if err := c.BindJSON(&data); err != nil { return } - adapter, err := data.AsBackup() - if err != nil { - middleware.CaptureAndAbort(c, err) + var adapter backup.BackupInterface + switch data.Adapter { + case backup.LocalBackupAdapter: + adapter = backup.NewLocal(data.Uuid, data.Ignore) + case backup.S3BackupAdapter: + adapter = backup.NewS3(data.Uuid, data.Ignore) + default: + middleware.CaptureAndAbort(c, errors.New("router/backups: provided adapter is not valid: "+string(data.Adapter))) return } + // Attach the server ID and the request ID to the adapter log context for easier // parsing in the logs. adapter.WithLogContext(map[string]interface{}{ @@ -55,7 +67,6 @@ func postServerRestoreBackup(c *gin.Context) { logger := middleware.ExtractLogger(c) var data struct { - UUID string `binding:"required,uuid" json:"uuid"` Adapter backup.AdapterType `binding:"required,oneof=wings s3" json:"adapter"` TruncateDirectory bool `json:"truncate_directory"` // A UUID is always required for this endpoint, however the download URL @@ -82,16 +93,33 @@ func postServerRestoreBackup(c *gin.Context) { // Now that we've cleaned up the data directory if necessary, grab the backup file // and attempt to restore it into the server directory. if data.Adapter == backup.LocalBackupAdapter { - b, _, err := backup.LocateLocal(data.UUID) + b, _, err := backup.LocateLocal(c.Param("backup")) if err != nil { middleware.CaptureAndAbort(c, err) return } - if err := b.Restore(s); err != nil { + // Restore restores a backup to the provided server's root data directory. + err = archiver.Walk(b.Path(), func(f archiver.File) error { + if f.IsDir() { + return nil + } + name, err := system.ExtractArchiveSourceName(f, "/") + if err != nil { + return err + } + return s.Filesystem().Writefile(name, f) + }) + if err != nil { middleware.CaptureAndAbort(c, err) return } + c.Status(http.StatusNoContent) + return } + + // Since this is not a local backup we need to stream the archive and then + // parse over the contents as we go in order to restore it to the server. + c.Status(http.StatusNoContent) } @@ -120,4 +148,4 @@ func deleteServerBackup(c *gin.Context) { return } c.Status(http.StatusNoContent) -} \ No newline at end of file +} diff --git a/server/filesystem/archive.go b/server/backup/archive.go similarity index 98% rename from server/filesystem/archive.go rename to server/backup/archive.go index f4227de..ded9fab 100644 --- a/server/filesystem/archive.go +++ b/server/backup/archive.go @@ -1,7 +1,13 @@ -package filesystem +package backup import ( "archive/tar" + "io" + "os" + "path/filepath" + "strings" + "sync" + "emperror.dev/errors" "github.com/apex/log" "github.com/juju/ratelimit" @@ -9,11 +15,6 @@ import ( "github.com/klauspost/pgzip" "github.com/pterodactyl/wings/config" "github.com/sabhiram/go-gitignore" - "io" - "os" - "path/filepath" - "strings" - "sync" ) const memory = 4 * 1024 @@ -39,7 +40,8 @@ type Archive struct { Files []string } -// Creates an archive at dst with all of the files defined in the included files struct. +// Create creates an archive at dst with all of the files defined in the +// included files struct. func (a *Archive) Create(dst string) error { f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { diff --git a/server/backup/backup.go b/server/backup/backup.go index 9c50e84..260bd88 100644 --- a/server/backup/backup.go +++ b/server/backup/backup.go @@ -8,7 +8,6 @@ import ( "path" "sync" - "emperror.dev/errors" "github.com/apex/log" "github.com/pterodactyl/wings/api" "github.com/pterodactyl/wings/config" @@ -21,45 +20,13 @@ const ( S3BackupAdapter AdapterType = "s3" ) -type Request struct { - Adapter AdapterType `json:"adapter"` - Uuid string `json:"uuid"` - Ignore string `json:"ignore"` -} - -// AsBackup returns a new backup adapter based on the request value. -func (r *Request) AsBackup() (BackupInterface, error) { - var adapter BackupInterface - switch r.Adapter { - case LocalBackupAdapter: - adapter = &LocalBackup{ - Backup{ - Uuid: r.Uuid, - Ignore: r.Ignore, - adapter: LocalBackupAdapter, - }, - } - case S3BackupAdapter: - adapter = &S3Backup{ - Backup: Backup{ - Uuid: r.Uuid, - Ignore: r.Ignore, - adapter: S3BackupAdapter, - }, - } - default: - return nil, errors.New("server/backup: unsupported adapter type: " + string(r.Adapter)) - } - return adapter, nil -} - type ArchiveDetails struct { Checksum string `json:"checksum"` ChecksumType string `json:"checksum_type"` Size int64 `json:"size"` } -// Returns a request object. +// ToRequest returns a request object. func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest { return api.BackupRequest{ Checksum: ad.Checksum, diff --git a/server/backup/backup_local.go b/server/backup/backup_local.go index 75305f9..1da94da 100644 --- a/server/backup/backup_local.go +++ b/server/backup/backup_local.go @@ -3,10 +3,6 @@ package backup import ( "errors" "os" - - "github.com/mholt/archiver/v3" - "github.com/pterodactyl/wings/server" - "github.com/pterodactyl/wings/server/filesystem" ) type LocalBackup struct { @@ -15,6 +11,16 @@ type LocalBackup struct { var _ BackupInterface = (*LocalBackup)(nil) +func NewLocal(uuid string, ignore string) *LocalBackup { + return &LocalBackup{ + Backup{ + Uuid: uuid, + Ignore: ignore, + adapter: LocalBackupAdapter, + }, + } +} + // LocateLocal finds the backup for a server and returns the local path. This // will obviously only work if the backup was created as a local backup. func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) { @@ -50,7 +56,7 @@ func (b *LocalBackup) WithLogContext(c map[string]interface{}) { // Generate generates a backup of the selected files and pushes it to the // defined location for this instance. func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) { - a := &filesystem.Archive{ + a := &Archive{ BasePath: basePath, Ignore: ignore, } @@ -64,16 +70,3 @@ func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) return b.Details(), nil } -// Restore restores a backup to the provided server's root data directory. -func (b *LocalBackup) Restore(s *server.Server) error { - return archiver.Walk(b.Path(), func(f archiver.File) error { - if f.IsDir() { - return nil - } - name, err := filesystem.ExtractArchiveSourceName(f, "/") - if err != nil { - return err - } - return s.Filesystem().Writefile(name, f) - }) -} diff --git a/server/backup/backup_s3.go b/server/backup/backup_s3.go index e5514b0..9bb3b7c 100644 --- a/server/backup/backup_s3.go +++ b/server/backup/backup_s3.go @@ -2,12 +2,12 @@ package backup import ( "fmt" - "github.com/pterodactyl/wings/api" - "github.com/pterodactyl/wings/server/filesystem" "io" "net/http" "os" "strconv" + + "github.com/pterodactyl/wings/api" ) type S3Backup struct { @@ -16,22 +16,32 @@ type S3Backup struct { var _ BackupInterface = (*S3Backup)(nil) -// Removes a backup from the system. +func NewS3(uuid string, ignore string) *S3Backup { + return &S3Backup{ + Backup{ + Uuid: uuid, + Ignore: ignore, + adapter: S3BackupAdapter, + }, + } +} + +// Remove removes a backup from the system. func (s *S3Backup) Remove() error { return os.Remove(s.Path()) } -// Attaches additional context to the log output for this backup. +// WithLogContext attaches additional context to the log output for this backup. func (s *S3Backup) WithLogContext(c map[string]interface{}) { s.logContext = c } -// Generates a new backup on the disk, moves it into the S3 bucket via the provided -// presigned URL, and then deletes the backup from the disk. +// Generate creates a new backup on the disk, moves it into the S3 bucket via +// the provided presigned URL, and then deletes the backup from the disk. func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) { defer s.Remove() - a := &filesystem.Archive{ + a := &Archive{ BasePath: basePath, Ignore: ignore, } diff --git a/server/filesystem/compress.go b/server/filesystem/compress.go index 595c393..7f26200 100644 --- a/server/filesystem/compress.go +++ b/server/filesystem/compress.go @@ -6,16 +6,23 @@ import ( "path" "path/filepath" "strings" + "sync/atomic" "time" + + "github.com/mholt/archiver/v3" + "github.com/pterodactyl/wings/server/backup" + "github.com/pterodactyl/wings/system" ) -// Compresses all of the files matching the given paths in the specified directory. This function -// also supports passing nested paths to only compress certain files and folders when working in -// a larger directory. This effectively creates a local backup, but rather than ignoring specific -// files and folders, it takes an allow-list of files and folders. +// CompressFiles compresses all of the files matching the given paths in the +// specified directory. This function also supports passing nested paths to only +// compress certain files and folders when working in a larger directory. This +// effectively creates a local backup, but rather than ignoring specific files +// and folders, it takes an allow-list of files and folders. // -// All paths are relative to the dir that is passed in as the first argument, and the compressed -// file will be placed at that location named `archive-{date}.tar.gz`. +// All paths are relative to the dir that is passed in as the first argument, +// and the compressed file will be placed at that location named +// `archive-{date}.tar.gz`. func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) { cleanedRootDir, err := fs.SafePath(dir) if err != nil { @@ -32,7 +39,7 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er return nil, err } - a := &Archive{BasePath: cleanedRootDir, Files: cleaned} + a := &backup.Archive{BasePath: cleanedRootDir, Files: cleaned} d := path.Join( cleanedRootDir, fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")), @@ -57,3 +64,86 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er return f, nil } + +// SpaceAvailableForDecompression looks through a given archive and determines +// if decompressing it would put the server over its allocated disk space limit. +func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error { + // Don't waste time trying to determine this if we know the server will have the space for + // it since there is no limit. + if fs.MaxDisk() <= 0 { + return nil + } + + source, err := fs.SafePath(filepath.Join(dir, file)) + if err != nil { + return err + } + + // Get the cached size in a parallel process so that if it is not cached we are not + // waiting an unnecessary amount of time on this call. + dirSize, err := fs.DiskUsage(false) + + var size int64 + // Walk over the archive and figure out just how large the final output would be from unarchiving it. + err = archiver.Walk(source, func(f archiver.File) error { + if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() { + return &Error{code: ErrCodeDiskSpace} + } + return nil + }) + if err != nil { + if strings.HasPrefix(err.Error(), "format ") { + return &Error{code: ErrCodeUnknownArchive} + } + return err + } + return err +} + +// DecompressFile will decompress a file in a given directory by using the +// archiver tool to infer the file type and go from there. This will walk over +// all of the files within the given archive and ensure that there is not a +// zip-slip attack being attempted by validating that the final path is within +// the server data directory. +func (fs *Filesystem) DecompressFile(dir string, file string) error { + source, err := fs.SafePath(filepath.Join(dir, file)) + if err != nil { + return err + } + // Ensure that the source archive actually exists on the system. + if _, err := os.Stat(source); err != nil { + return err + } + + // Walk over all of the files spinning up an additional go-routine for each file we've encountered + // and then extract that file from the archive and write it to the disk. If any part of this process + // encounters an error the entire process will be stopped. + err = archiver.Walk(source, func(f archiver.File) error { + // Don't waste time with directories, we don't need to create them if they have no contents, and + // we will ensure the directory exists when opening the file for writing anyways. + if f.IsDir() { + return nil + } + name, err := system.ExtractArchiveSourceName(f, dir) + if err != nil { + return WrapError(err, filepath.Join(dir, f.Name())) + } + p := filepath.Join(dir, name) + // If it is ignored, just don't do anything with the file and skip over it. + if err := fs.IsIgnored(p); err != nil { + return nil + } + if err := fs.Writefile(p, f); err != nil { + return &Error{code: ErrCodeUnknownError, err: err, resolved: source} + } + return nil + }) + if err != nil { + if strings.HasPrefix(err.Error(), "format ") { + return &Error{code: ErrCodeUnknownArchive} + } + return err + } + return nil +} + diff --git a/server/filesystem/decompress.go b/server/filesystem/decompress.go deleted file mode 100644 index b7f18f2..0000000 --- a/server/filesystem/decompress.go +++ /dev/null @@ -1,118 +0,0 @@ -package filesystem - -import ( - "archive/tar" - "archive/zip" - "compress/gzip" - "fmt" - "os" - "path/filepath" - "reflect" - "strings" - "sync/atomic" - - "emperror.dev/errors" - "github.com/mholt/archiver/v3" -) - -// SpaceAvailableForDecompression looks through a given archive and determines -// if decompressing it would put the server over its allocated disk space limit. -func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error { - // Don't waste time trying to determine this if we know the server will have the space for - // it since there is no limit. - if fs.MaxDisk() <= 0 { - return nil - } - - source, err := fs.SafePath(filepath.Join(dir, file)) - if err != nil { - return err - } - - // Get the cached size in a parallel process so that if it is not cached we are not - // waiting an unnecessary amount of time on this call. - dirSize, err := fs.DiskUsage(false) - - var size int64 - // Walk over the archive and figure out just how large the final output would be from unarchiving it. - err = archiver.Walk(source, func(f archiver.File) error { - if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() { - return &Error{code: ErrCodeDiskSpace} - } - return nil - }) - if err != nil { - if strings.HasPrefix(err.Error(), "format ") { - return &Error{code: ErrCodeUnknownArchive} - } - return err - } - return err -} - -// DecompressFile will decompress a file in a given directory by using the -// archiver tool to infer the file type and go from there. This will walk over -// all of the files within the given archive and ensure that there is not a -// zip-slip attack being attempted by validating that the final path is within -// the server data directory. -func (fs *Filesystem) DecompressFile(dir string, file string) error { - source, err := fs.SafePath(filepath.Join(dir, file)) - if err != nil { - return err - } - // Ensure that the source archive actually exists on the system. - if _, err := os.Stat(source); err != nil { - return err - } - - // Walk over all of the files spinning up an additional go-routine for each file we've encountered - // and then extract that file from the archive and write it to the disk. If any part of this process - // encounters an error the entire process will be stopped. - err = archiver.Walk(source, func(f archiver.File) error { - // Don't waste time with directories, we don't need to create them if they have no contents, and - // we will ensure the directory exists when opening the file for writing anyways. - if f.IsDir() { - return nil - } - name, err := ExtractArchiveSourceName(f, dir) - if err != nil { - return err - } - p := filepath.Join(dir, name) - // If it is ignored, just don't do anything with the file and skip over it. - if err := fs.IsIgnored(p); err != nil { - return nil - } - if err := fs.Writefile(p, f); err != nil { - return &Error{code: ErrCodeUnknownError, err: err, resolved: source} - } - return nil - }) - if err != nil { - if strings.HasPrefix(err.Error(), "format ") { - return &Error{code: ErrCodeUnknownArchive} - } - return err - } - return nil -} - -// ExtractArchiveSourceName looks for the provided archiver.File's name if it is -// a type that is supported, otherwise it returns an error to the caller. -func ExtractArchiveSourceName(f archiver.File, dir string) (name string, err error) { - switch s := f.Sys().(type) { - case *tar.Header: - name = s.Name - case *gzip.Header: - name = s.Name - case *zip.FileHeader: - name = s.Name - default: - err = &Error{ - code: ErrCodeUnknownError, - resolved: filepath.Join(dir, f.Name()), - err: errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String())), - } - } - return name, err -} diff --git a/system/utils.go b/system/utils.go index 681ac2a..b32dec1 100644 --- a/system/utils.go +++ b/system/utils.go @@ -1,18 +1,23 @@ package system import ( + "archive/tar" + "archive/zip" "bufio" "bytes" + "compress/gzip" "context" "encoding/json" "fmt" "io" + "reflect" "strconv" "strings" "sync" "time" "emperror.dev/errors" + "github.com/mholt/archiver/v3" ) var cr = []byte(" \r") @@ -36,6 +41,22 @@ func MustInt(v string) int { return i } +// ExtractArchiveSourceName looks for the provided archiver.File's name if it is +// a type that is supported, otherwise it returns an error to the caller. +func ExtractArchiveSourceName(f archiver.File, dir string) (name string, err error) { + switch s := f.Sys().(type) { + case *tar.Header: + name = s.Name + case *gzip.Header: + name = s.Name + case *zip.FileHeader: + name = s.Name + default: + err = errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String())) + } + return name, err +} + func ScanReader(r io.Reader, callback func(line string)) error { br := bufio.NewReader(r) // Avoid constantly re-allocating memory when we're flooding lines through this