Fix import cycle issue

This commit is contained in:
Dane Everitt 2021-01-17 21:05:51 -08:00
parent 7dd0acebc0
commit 66b6f40b61
No known key found for this signature in database
GPG Key ID: EEA66103B3D71F53
9 changed files with 193 additions and 199 deletions

View File

@ -95,6 +95,7 @@ func Configure() *gin.Engine {
backup := server.Group("/backup") backup := server.Group("/backup")
{ {
backup.POST("", postServerBackup) backup.POST("", postServerBackup)
backup.POST("/:backup/restore", postServerRestoreBackup)
backup.DELETE("/:backup", deleteServerBackup) backup.DELETE("/:backup", deleteServerBackup)
} }
} }

View File

@ -7,9 +7,11 @@ import (
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/router/middleware" "github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/backup" "github.com/pterodactyl/wings/server/backup"
"github.com/pterodactyl/wings/system"
) )
// postServerBackup performs a backup against a given server instance using the // postServerBackup performs a backup against a given server instance using the
@ -17,16 +19,26 @@ import (
func postServerBackup(c *gin.Context) { func postServerBackup(c *gin.Context) {
s := middleware.ExtractServer(c) s := middleware.ExtractServer(c)
logger := middleware.ExtractLogger(c) logger := middleware.ExtractLogger(c)
var data backup.Request var data struct {
Adapter backup.AdapterType `json:"adapter"`
Uuid string `json:"uuid"`
Ignore string `json:"ignore"`
}
if err := c.BindJSON(&data); err != nil { if err := c.BindJSON(&data); err != nil {
return return
} }
adapter, err := data.AsBackup() var adapter backup.BackupInterface
if err != nil { switch data.Adapter {
middleware.CaptureAndAbort(c, err) case backup.LocalBackupAdapter:
adapter = backup.NewLocal(data.Uuid, data.Ignore)
case backup.S3BackupAdapter:
adapter = backup.NewS3(data.Uuid, data.Ignore)
default:
middleware.CaptureAndAbort(c, errors.New("router/backups: provided adapter is not valid: "+string(data.Adapter)))
return return
} }
// Attach the server ID and the request ID to the adapter log context for easier // Attach the server ID and the request ID to the adapter log context for easier
// parsing in the logs. // parsing in the logs.
adapter.WithLogContext(map[string]interface{}{ adapter.WithLogContext(map[string]interface{}{
@ -55,7 +67,6 @@ func postServerRestoreBackup(c *gin.Context) {
logger := middleware.ExtractLogger(c) logger := middleware.ExtractLogger(c)
var data struct { var data struct {
UUID string `binding:"required,uuid" json:"uuid"`
Adapter backup.AdapterType `binding:"required,oneof=wings s3" json:"adapter"` Adapter backup.AdapterType `binding:"required,oneof=wings s3" json:"adapter"`
TruncateDirectory bool `json:"truncate_directory"` TruncateDirectory bool `json:"truncate_directory"`
// A UUID is always required for this endpoint, however the download URL // A UUID is always required for this endpoint, however the download URL
@ -82,16 +93,33 @@ func postServerRestoreBackup(c *gin.Context) {
// Now that we've cleaned up the data directory if necessary, grab the backup file // Now that we've cleaned up the data directory if necessary, grab the backup file
// and attempt to restore it into the server directory. // and attempt to restore it into the server directory.
if data.Adapter == backup.LocalBackupAdapter { if data.Adapter == backup.LocalBackupAdapter {
b, _, err := backup.LocateLocal(data.UUID) b, _, err := backup.LocateLocal(c.Param("backup"))
if err != nil { if err != nil {
middleware.CaptureAndAbort(c, err) middleware.CaptureAndAbort(c, err)
return return
} }
if err := b.Restore(s); err != nil { // Restore restores a backup to the provided server's root data directory.
err = archiver.Walk(b.Path(), func(f archiver.File) error {
if f.IsDir() {
return nil
}
name, err := system.ExtractArchiveSourceName(f, "/")
if err != nil {
return err
}
return s.Filesystem().Writefile(name, f)
})
if err != nil {
middleware.CaptureAndAbort(c, err) middleware.CaptureAndAbort(c, err)
return return
} }
c.Status(http.StatusNoContent)
return
} }
// Since this is not a local backup we need to stream the archive and then
// parse over the contents as we go in order to restore it to the server.
c.Status(http.StatusNoContent) c.Status(http.StatusNoContent)
} }
@ -120,4 +148,4 @@ func deleteServerBackup(c *gin.Context) {
return return
} }
c.Status(http.StatusNoContent) c.Status(http.StatusNoContent)
} }

View File

@ -1,7 +1,13 @@
package filesystem package backup
import ( import (
"archive/tar" "archive/tar"
"io"
"os"
"path/filepath"
"strings"
"sync"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/juju/ratelimit" "github.com/juju/ratelimit"
@ -9,11 +15,6 @@ import (
"github.com/klauspost/pgzip" "github.com/klauspost/pgzip"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/sabhiram/go-gitignore" "github.com/sabhiram/go-gitignore"
"io"
"os"
"path/filepath"
"strings"
"sync"
) )
const memory = 4 * 1024 const memory = 4 * 1024
@ -39,7 +40,8 @@ type Archive struct {
Files []string Files []string
} }
// Creates an archive at dst with all of the files defined in the included files struct. // Create creates an archive at dst with all of the files defined in the
// included files struct.
func (a *Archive) Create(dst string) error { func (a *Archive) Create(dst string) error {
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil { if err != nil {

View File

@ -8,7 +8,6 @@ import (
"path" "path"
"sync" "sync"
"emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/pterodactyl/wings/api" "github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
@ -21,45 +20,13 @@ const (
S3BackupAdapter AdapterType = "s3" S3BackupAdapter AdapterType = "s3"
) )
type Request struct {
Adapter AdapterType `json:"adapter"`
Uuid string `json:"uuid"`
Ignore string `json:"ignore"`
}
// AsBackup returns a new backup adapter based on the request value.
func (r *Request) AsBackup() (BackupInterface, error) {
var adapter BackupInterface
switch r.Adapter {
case LocalBackupAdapter:
adapter = &LocalBackup{
Backup{
Uuid: r.Uuid,
Ignore: r.Ignore,
adapter: LocalBackupAdapter,
},
}
case S3BackupAdapter:
adapter = &S3Backup{
Backup: Backup{
Uuid: r.Uuid,
Ignore: r.Ignore,
adapter: S3BackupAdapter,
},
}
default:
return nil, errors.New("server/backup: unsupported adapter type: " + string(r.Adapter))
}
return adapter, nil
}
type ArchiveDetails struct { type ArchiveDetails struct {
Checksum string `json:"checksum"` Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"` ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"` Size int64 `json:"size"`
} }
// Returns a request object. // ToRequest returns a request object.
func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest { func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
return api.BackupRequest{ return api.BackupRequest{
Checksum: ad.Checksum, Checksum: ad.Checksum,

View File

@ -3,10 +3,6 @@ package backup
import ( import (
"errors" "errors"
"os" "os"
"github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/filesystem"
) )
type LocalBackup struct { type LocalBackup struct {
@ -15,6 +11,16 @@ type LocalBackup struct {
var _ BackupInterface = (*LocalBackup)(nil) var _ BackupInterface = (*LocalBackup)(nil)
func NewLocal(uuid string, ignore string) *LocalBackup {
return &LocalBackup{
Backup{
Uuid: uuid,
Ignore: ignore,
adapter: LocalBackupAdapter,
},
}
}
// LocateLocal finds the backup for a server and returns the local path. This // LocateLocal finds the backup for a server and returns the local path. This
// will obviously only work if the backup was created as a local backup. // will obviously only work if the backup was created as a local backup.
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) { func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
@ -50,7 +56,7 @@ func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
// Generate generates a backup of the selected files and pushes it to the // Generate generates a backup of the selected files and pushes it to the
// defined location for this instance. // defined location for this instance.
func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) { func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
a := &filesystem.Archive{ a := &Archive{
BasePath: basePath, BasePath: basePath,
Ignore: ignore, Ignore: ignore,
} }
@ -64,16 +70,3 @@ func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error)
return b.Details(), nil return b.Details(), nil
} }
// Restore restores a backup to the provided server's root data directory.
func (b *LocalBackup) Restore(s *server.Server) error {
return archiver.Walk(b.Path(), func(f archiver.File) error {
if f.IsDir() {
return nil
}
name, err := filesystem.ExtractArchiveSourceName(f, "/")
if err != nil {
return err
}
return s.Filesystem().Writefile(name, f)
})
}

View File

@ -2,12 +2,12 @@ package backup
import ( import (
"fmt" "fmt"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/server/filesystem"
"io" "io"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
"github.com/pterodactyl/wings/api"
) )
type S3Backup struct { type S3Backup struct {
@ -16,22 +16,32 @@ type S3Backup struct {
var _ BackupInterface = (*S3Backup)(nil) var _ BackupInterface = (*S3Backup)(nil)
// Removes a backup from the system. func NewS3(uuid string, ignore string) *S3Backup {
return &S3Backup{
Backup{
Uuid: uuid,
Ignore: ignore,
adapter: S3BackupAdapter,
},
}
}
// Remove removes a backup from the system.
func (s *S3Backup) Remove() error { func (s *S3Backup) Remove() error {
return os.Remove(s.Path()) return os.Remove(s.Path())
} }
// Attaches additional context to the log output for this backup. // WithLogContext attaches additional context to the log output for this backup.
func (s *S3Backup) WithLogContext(c map[string]interface{}) { func (s *S3Backup) WithLogContext(c map[string]interface{}) {
s.logContext = c s.logContext = c
} }
// Generates a new backup on the disk, moves it into the S3 bucket via the provided // Generate creates a new backup on the disk, moves it into the S3 bucket via
// presigned URL, and then deletes the backup from the disk. // the provided presigned URL, and then deletes the backup from the disk.
func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) { func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
defer s.Remove() defer s.Remove()
a := &filesystem.Archive{ a := &Archive{
BasePath: basePath, BasePath: basePath,
Ignore: ignore, Ignore: ignore,
} }

View File

@ -6,16 +6,23 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"strings" "strings"
"sync/atomic"
"time" "time"
"github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/server/backup"
"github.com/pterodactyl/wings/system"
) )
// Compresses all of the files matching the given paths in the specified directory. This function // CompressFiles compresses all of the files matching the given paths in the
// also supports passing nested paths to only compress certain files and folders when working in // specified directory. This function also supports passing nested paths to only
// a larger directory. This effectively creates a local backup, but rather than ignoring specific // compress certain files and folders when working in a larger directory. This
// files and folders, it takes an allow-list of files and folders. // effectively creates a local backup, but rather than ignoring specific files
// and folders, it takes an allow-list of files and folders.
// //
// All paths are relative to the dir that is passed in as the first argument, and the compressed // All paths are relative to the dir that is passed in as the first argument,
// file will be placed at that location named `archive-{date}.tar.gz`. // and the compressed file will be placed at that location named
// `archive-{date}.tar.gz`.
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) { func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
cleanedRootDir, err := fs.SafePath(dir) cleanedRootDir, err := fs.SafePath(dir)
if err != nil { if err != nil {
@ -32,7 +39,7 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
return nil, err return nil, err
} }
a := &Archive{BasePath: cleanedRootDir, Files: cleaned} a := &backup.Archive{BasePath: cleanedRootDir, Files: cleaned}
d := path.Join( d := path.Join(
cleanedRootDir, cleanedRootDir,
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")), fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
@ -57,3 +64,86 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
return f, nil return f, nil
} }
// SpaceAvailableForDecompression looks through a given archive and determines
// if decompressing it would put the server over its allocated disk space limit.
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error {
// Don't waste time trying to determine this if we know the server will have the space for
// it since there is no limit.
if fs.MaxDisk() <= 0 {
return nil
}
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Get the cached size in a parallel process so that if it is not cached we are not
// waiting an unnecessary amount of time on this call.
dirSize, err := fs.DiskUsage(false)
var size int64
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
err = archiver.Walk(source, func(f archiver.File) error {
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
return &Error{code: ErrCodeDiskSpace}
}
return nil
})
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return &Error{code: ErrCodeUnknownArchive}
}
return err
}
return err
}
// DecompressFile will decompress a file in a given directory by using the
// archiver tool to infer the file type and go from there. This will walk over
// all of the files within the given archive and ensure that there is not a
// zip-slip attack being attempted by validating that the final path is within
// the server data directory.
func (fs *Filesystem) DecompressFile(dir string, file string) error {
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Ensure that the source archive actually exists on the system.
if _, err := os.Stat(source); err != nil {
return err
}
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
// and then extract that file from the archive and write it to the disk. If any part of this process
// encounters an error the entire process will be stopped.
err = archiver.Walk(source, func(f archiver.File) error {
// Don't waste time with directories, we don't need to create them if they have no contents, and
// we will ensure the directory exists when opening the file for writing anyways.
if f.IsDir() {
return nil
}
name, err := system.ExtractArchiveSourceName(f, dir)
if err != nil {
return WrapError(err, filepath.Join(dir, f.Name()))
}
p := filepath.Join(dir, name)
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
return nil
}
if err := fs.Writefile(p, f); err != nil {
return &Error{code: ErrCodeUnknownError, err: err, resolved: source}
}
return nil
})
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return &Error{code: ErrCodeUnknownArchive}
}
return err
}
return nil
}

View File

@ -1,118 +0,0 @@
package filesystem
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"sync/atomic"
"emperror.dev/errors"
"github.com/mholt/archiver/v3"
)
// SpaceAvailableForDecompression looks through a given archive and determines
// if decompressing it would put the server over its allocated disk space limit.
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error {
// Don't waste time trying to determine this if we know the server will have the space for
// it since there is no limit.
if fs.MaxDisk() <= 0 {
return nil
}
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Get the cached size in a parallel process so that if it is not cached we are not
// waiting an unnecessary amount of time on this call.
dirSize, err := fs.DiskUsage(false)
var size int64
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
err = archiver.Walk(source, func(f archiver.File) error {
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
return &Error{code: ErrCodeDiskSpace}
}
return nil
})
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return &Error{code: ErrCodeUnknownArchive}
}
return err
}
return err
}
// DecompressFile will decompress a file in a given directory by using the
// archiver tool to infer the file type and go from there. This will walk over
// all of the files within the given archive and ensure that there is not a
// zip-slip attack being attempted by validating that the final path is within
// the server data directory.
func (fs *Filesystem) DecompressFile(dir string, file string) error {
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Ensure that the source archive actually exists on the system.
if _, err := os.Stat(source); err != nil {
return err
}
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
// and then extract that file from the archive and write it to the disk. If any part of this process
// encounters an error the entire process will be stopped.
err = archiver.Walk(source, func(f archiver.File) error {
// Don't waste time with directories, we don't need to create them if they have no contents, and
// we will ensure the directory exists when opening the file for writing anyways.
if f.IsDir() {
return nil
}
name, err := ExtractArchiveSourceName(f, dir)
if err != nil {
return err
}
p := filepath.Join(dir, name)
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
return nil
}
if err := fs.Writefile(p, f); err != nil {
return &Error{code: ErrCodeUnknownError, err: err, resolved: source}
}
return nil
})
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return &Error{code: ErrCodeUnknownArchive}
}
return err
}
return nil
}
// ExtractArchiveSourceName looks for the provided archiver.File's name if it is
// a type that is supported, otherwise it returns an error to the caller.
func ExtractArchiveSourceName(f archiver.File, dir string) (name string, err error) {
switch s := f.Sys().(type) {
case *tar.Header:
name = s.Name
case *gzip.Header:
name = s.Name
case *zip.FileHeader:
name = s.Name
default:
err = &Error{
code: ErrCodeUnknownError,
resolved: filepath.Join(dir, f.Name()),
err: errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String())),
}
}
return name, err
}

View File

@ -1,18 +1,23 @@
package system package system
import ( import (
"archive/tar"
"archive/zip"
"bufio" "bufio"
"bytes" "bytes"
"compress/gzip"
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"reflect"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/mholt/archiver/v3"
) )
var cr = []byte(" \r") var cr = []byte(" \r")
@ -36,6 +41,22 @@ func MustInt(v string) int {
return i return i
} }
// ExtractArchiveSourceName looks for the provided archiver.File's name if it is
// a type that is supported, otherwise it returns an error to the caller.
func ExtractArchiveSourceName(f archiver.File, dir string) (name string, err error) {
switch s := f.Sys().(type) {
case *tar.Header:
name = s.Name
case *gzip.Header:
name = s.Name
case *zip.FileHeader:
name = s.Name
default:
err = errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String()))
}
return name, err
}
func ScanReader(r io.Reader, callback func(line string)) error { func ScanReader(r io.Reader, callback func(line string)) error {
br := bufio.NewReader(r) br := bufio.NewReader(r)
// Avoid constantly re-allocating memory when we're flooding lines through this // Avoid constantly re-allocating memory when we're flooding lines through this