Initial untested pass at restoring from local backups

This commit is contained in:
Dane Everitt
2021-01-16 18:06:22 -08:00
parent 6a286fb444
commit 7dd0acebc0
6 changed files with 183 additions and 116 deletions

View File

@@ -3,13 +3,15 @@ package backup
import (
"crypto/sha1"
"encoding/hex"
"github.com/apex/log"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"io"
"os"
"path"
"sync"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
)
type AdapterType string
@@ -19,6 +21,38 @@ const (
S3BackupAdapter AdapterType = "s3"
)
type Request struct {
Adapter AdapterType `json:"adapter"`
Uuid string `json:"uuid"`
Ignore string `json:"ignore"`
}
// AsBackup returns a new backup adapter based on the request value.
func (r *Request) AsBackup() (BackupInterface, error) {
var adapter BackupInterface
switch r.Adapter {
case LocalBackupAdapter:
adapter = &LocalBackup{
Backup{
Uuid: r.Uuid,
Ignore: r.Ignore,
adapter: LocalBackupAdapter,
},
}
case S3BackupAdapter:
adapter = &S3Backup{
Backup: Backup{
Uuid: r.Uuid,
Ignore: r.Ignore,
adapter: S3BackupAdapter,
},
}
default:
return nil, errors.New("server/backup: unsupported adapter type: " + string(r.Adapter))
}
return adapter, nil
}
type ArchiveDetails struct {
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`

View File

@@ -2,8 +2,11 @@ package backup
import (
"errors"
"github.com/pterodactyl/wings/server/filesystem"
"os"
"github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/filesystem"
)
type LocalBackup struct {
@@ -12,8 +15,8 @@ type LocalBackup struct {
var _ BackupInterface = (*LocalBackup)(nil)
// Locates the backup for a server and returns the local path. This will obviously only
// work if the backup was created as a local backup.
// LocateLocal finds the backup for a server and returns the local path. This
// will obviously only work if the backup was created as a local backup.
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
b := &LocalBackup{
Backup{
@@ -34,18 +37,18 @@ func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
return b, st, nil
}
// Removes a backup from the system.
// Remove removes a backup from the system.
func (b *LocalBackup) Remove() error {
return os.Remove(b.Path())
}
// Attaches additional context to the log output for this backup.
// WithLogContext attaches additional context to the log output for this backup.
func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
b.logContext = c
}
// Generates a backup of the selected files and pushes it to the defined location
// for this instance.
// Generate generates a backup of the selected files and pushes it to the
// defined location for this instance.
func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
a := &filesystem.Archive{
BasePath: basePath,
@@ -60,3 +63,17 @@ func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error)
return b.Details(), nil
}
// Restore restores a backup to the provided server's root data directory.
func (b *LocalBackup) Restore(s *server.Server) error {
return archiver.Walk(b.Path(), func(f archiver.File) error {
if f.IsDir() {
return nil
}
name, err := filesystem.ExtractArchiveSourceName(f, "/")
if err != nil {
return err
}
return s.Filesystem().Writefile(name, f)
})
}

View File

@@ -1,42 +0,0 @@
package backup
import (
"errors"
"fmt"
)
type Request struct {
Adapter AdapterType `json:"adapter"`
Uuid string `json:"uuid"`
Ignore string `json:"ignore"`
}
// Generates a new local backup struct.
func (r *Request) NewLocalBackup() (*LocalBackup, error) {
if r.Adapter != LocalBackupAdapter {
return nil, errors.New(fmt.Sprintf("cannot create local backup using [%s] adapter", r.Adapter))
}
return &LocalBackup{
Backup{
Uuid: r.Uuid,
Ignore: r.Ignore,
adapter: LocalBackupAdapter,
},
}, nil
}
// Generates a new S3 backup struct.
func (r *Request) NewS3Backup() (*S3Backup, error) {
if r.Adapter != S3BackupAdapter {
return nil, errors.New(fmt.Sprintf("cannot create s3 backup using [%s] adapter", r.Adapter))
}
return &S3Backup{
Backup: Backup{
Uuid: r.Uuid,
Ignore: r.Ignore,
adapter: S3BackupAdapter,
},
}, nil
}

View File

@@ -74,23 +74,10 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
if f.IsDir() {
return nil
}
var name string
switch s := f.Sys().(type) {
case *tar.Header:
name = s.Name
case *gzip.Header:
name = s.Name
case *zip.FileHeader:
name = s.Name
default:
return &Error{
code: ErrCodeUnknownError,
resolved: filepath.Join(dir, f.Name()),
err: errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String())),
}
name, err := ExtractArchiveSourceName(f, dir)
if err != nil {
return err
}
p := filepath.Join(dir, name)
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
@@ -109,3 +96,23 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
}
return nil
}
// ExtractArchiveSourceName looks for the provided archiver.File's name if it is
// a type that is supported, otherwise it returns an error to the caller.
func ExtractArchiveSourceName(f archiver.File, dir string) (name string, err error) {
switch s := f.Sys().(type) {
case *tar.Header:
name = s.Name
case *gzip.Header:
name = s.Name
case *zip.FileHeader:
name = s.Name
default:
err = &Error{
code: ErrCodeUnknownError,
resolved: filepath.Join(dir, f.Name()),
err: errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String())),
}
}
return name, err
}

View File

@@ -11,6 +11,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"emperror.dev/errors"
@@ -124,7 +125,8 @@ func (fs *Filesystem) Readfile(p string, w io.Writer) error {
}
// Writefile writes a file to the system. If the file does not already exist one
// will be created.
// will be created. This will also properly recalculate the disk space used by
// the server when writing new files or modifying existing ones.
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
cleaned, err := fs.SafePath(p)
if err != nil {
@@ -365,8 +367,21 @@ func (fs *Filesystem) Copy(p string) error {
return fs.Writefile(path.Join(relative, n), source)
}
// Deletes a file or folder from the system. Prevents the user from accidentally
// (or maliciously) removing their root server data directory.
// TruncateRootDirectory removes _all_ files and directories from a server's
// data directory and resets the used disk space to zero.
func (fs *Filesystem) TruncateRootDirectory() error {
if err := os.RemoveAll(fs.Path()); err != nil {
return err
}
if err := os.Mkdir(fs.Path(), 0755); err != nil {
return err
}
atomic.StoreInt64(&fs.diskUsed, 0)
return nil
}
// Delete removes a file or folder from the system. Prevents the user from
// accidentally (or maliciously) removing their root server data directory.
func (fs *Filesystem) Delete(p string) error {
wg := sync.WaitGroup{}
// This is one of the few (only?) places in the codebase where we're explicitly not using