Fix import cycle issue
This commit is contained in:
226
server/backup/archive.go
Normal file
226
server/backup/archive.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/sabhiram/go-gitignore"
|
||||
)
|
||||
|
||||
const memory = 4 * 1024
|
||||
|
||||
var pool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, memory)
|
||||
return b
|
||||
},
|
||||
}
|
||||
|
||||
type Archive struct {
|
||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
||||
// relative to.
|
||||
BasePath string
|
||||
|
||||
// Ignore is a gitignore string (most likely read from a file) of files to ignore
|
||||
// from the archive.
|
||||
Ignore string
|
||||
|
||||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
||||
Files []string
|
||||
}
|
||||
|
||||
// Create creates an archive at dst with all of the files defined in the
|
||||
// included files struct.
|
||||
func (a *Archive) Create(dst string) error {
|
||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Select a writer based off of the WriteLimit configuration option. If there is no
|
||||
// write limit, use the file as the writer.
|
||||
var writer io.Writer
|
||||
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
||||
// Token bucket with a capacity of "writeLimit" MiB, adding "writeLimit" MiB/s
|
||||
// and then wrap the file writer with the token bucket limiter.
|
||||
writer = ratelimit.Writer(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
||||
} else {
|
||||
writer = f
|
||||
}
|
||||
|
||||
// Create a new gzip writer around the file.
|
||||
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
|
||||
_ = gw.SetConcurrency(1<<20, 1)
|
||||
defer gw.Close()
|
||||
|
||||
// Create a new tar writer around the gzip writer.
|
||||
tw := tar.NewWriter(gw)
|
||||
defer tw.Close()
|
||||
|
||||
// Configure godirwalk.
|
||||
options := &godirwalk.Options{
|
||||
FollowSymbolicLinks: false,
|
||||
Unsorted: true,
|
||||
Callback: a.callback(tw),
|
||||
}
|
||||
|
||||
// If we're specifically looking for only certain files, or have requested
|
||||
// that certain files be ignored we'll update the callback function to reflect
|
||||
// that request.
|
||||
if len(a.Files) == 0 && len(a.Ignore) > 0 {
|
||||
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
|
||||
|
||||
options.Callback = a.callback(tw, func(_ string, rp string) error {
|
||||
if i.MatchesPath(rp) {
|
||||
return godirwalk.SkipThis
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
} else if len(a.Files) > 0 {
|
||||
options.Callback = a.withFilesCallback(tw)
|
||||
}
|
||||
|
||||
// Recursively walk the path we are archiving.
|
||||
return godirwalk.Walk(a.BasePath, options)
|
||||
}
|
||||
|
||||
// Callback function used to determine if a given file should be included in the archive
|
||||
// being generated.
|
||||
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
||||
return func(path string, de *godirwalk.Dirent) error {
|
||||
// Skip directories because we walking them recursively.
|
||||
if de.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relative := filepath.ToSlash(strings.TrimPrefix(path, a.BasePath+string(filepath.Separator)))
|
||||
|
||||
// Call the additional options passed to this callback function. If any of them return
|
||||
// a non-nil error we will exit immediately.
|
||||
for _, opt := range opts {
|
||||
if err := opt(path, relative); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the file to the archive, if it is nested in a directory,
|
||||
// the directory will be automatically "created" in the archive.
|
||||
return a.addToArchive(path, relative, tw)
|
||||
}
|
||||
}
|
||||
|
||||
// Pushes only files defined in the Files key to the final archive.
|
||||
func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirwalk.Dirent) error {
|
||||
return a.callback(tw, func(p string, rp string) error {
|
||||
for _, f := range a.Files {
|
||||
// If the given doesn't match, or doesn't have the same prefix continue
|
||||
// to the next item in the loop.
|
||||
if p != f && !strings.HasPrefix(p, f) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Once we have a match return a nil value here so that the loop stops and the
|
||||
// call to this function will correctly include the file in the archive. If there
|
||||
// are no matches we'll never make it to this line, and the final error returned
|
||||
// will be the godirwalk.SkipThis error.
|
||||
return nil
|
||||
}
|
||||
|
||||
return godirwalk.SkipThis
|
||||
})
|
||||
}
|
||||
|
||||
// Adds a given file path to the final archive being created.
|
||||
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
||||
// Lstat the file, this will give us the same information as Stat except that it will not
|
||||
// follow a symlink to it's target automatically. This is important to avoid including
|
||||
// files that exist outside the server root unintentionally in the backup.
|
||||
s, err := os.Lstat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.WrapIff(err, "failed executing os.Lstat on '%s'", rp)
|
||||
}
|
||||
|
||||
// Resolve the symlink target if the file is a symlink.
|
||||
var target string
|
||||
if s.Mode()&os.ModeSymlink != 0 {
|
||||
// Read the target of the symlink. If there are any errors we will dump them out to
|
||||
// the logs, but we're not going to stop the backup. There are far too many cases of
|
||||
// symlinks causing all sorts of unnecessary pain in this process. Sucks to suck if
|
||||
// it doesn't work.
|
||||
target, err = os.Readlink(s.Name())
|
||||
if err != nil {
|
||||
// Ignore the not exist errors specifically, since theres nothing important about that.
|
||||
if !os.IsNotExist(err) {
|
||||
log.WithField("path", rp).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get the tar FileInfoHeader in order to add the file to the archive.
|
||||
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
|
||||
if err != nil {
|
||||
return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", rp)
|
||||
}
|
||||
|
||||
// Fix the header name if the file is not a symlink.
|
||||
if s.Mode()&os.ModeSymlink == 0 {
|
||||
header.Name = rp
|
||||
}
|
||||
|
||||
// Write the tar FileInfoHeader to the archive.
|
||||
if err := w.WriteHeader(header); err != nil {
|
||||
return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", rp)
|
||||
}
|
||||
|
||||
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
|
||||
if header.Size < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the buffer size is larger than the file size, create a smaller buffer to hold the file.
|
||||
var buf []byte
|
||||
if header.Size < memory {
|
||||
buf = make([]byte, header.Size)
|
||||
} else {
|
||||
// Get a fixed-size buffer from the pool to save on allocations.
|
||||
buf = pool.Get().([]byte)
|
||||
defer func() {
|
||||
buf = make([]byte, memory)
|
||||
pool.Put(buf)
|
||||
}()
|
||||
}
|
||||
|
||||
// Open the file.
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.WrapIff(err, "failed to open '%s' for copying", header.Name)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Copy the file's contents to the archive using our buffer.
|
||||
if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
|
||||
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
@@ -21,45 +20,13 @@ const (
|
||||
S3BackupAdapter AdapterType = "s3"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Adapter AdapterType `json:"adapter"`
|
||||
Uuid string `json:"uuid"`
|
||||
Ignore string `json:"ignore"`
|
||||
}
|
||||
|
||||
// AsBackup returns a new backup adapter based on the request value.
|
||||
func (r *Request) AsBackup() (BackupInterface, error) {
|
||||
var adapter BackupInterface
|
||||
switch r.Adapter {
|
||||
case LocalBackupAdapter:
|
||||
adapter = &LocalBackup{
|
||||
Backup{
|
||||
Uuid: r.Uuid,
|
||||
Ignore: r.Ignore,
|
||||
adapter: LocalBackupAdapter,
|
||||
},
|
||||
}
|
||||
case S3BackupAdapter:
|
||||
adapter = &S3Backup{
|
||||
Backup: Backup{
|
||||
Uuid: r.Uuid,
|
||||
Ignore: r.Ignore,
|
||||
adapter: S3BackupAdapter,
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("server/backup: unsupported adapter type: " + string(r.Adapter))
|
||||
}
|
||||
return adapter, nil
|
||||
}
|
||||
|
||||
type ArchiveDetails struct {
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// Returns a request object.
|
||||
// ToRequest returns a request object.
|
||||
func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
|
||||
return api.BackupRequest{
|
||||
Checksum: ad.Checksum,
|
||||
|
||||
@@ -3,10 +3,6 @@ package backup
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
)
|
||||
|
||||
type LocalBackup struct {
|
||||
@@ -15,6 +11,16 @@ type LocalBackup struct {
|
||||
|
||||
var _ BackupInterface = (*LocalBackup)(nil)
|
||||
|
||||
func NewLocal(uuid string, ignore string) *LocalBackup {
|
||||
return &LocalBackup{
|
||||
Backup{
|
||||
Uuid: uuid,
|
||||
Ignore: ignore,
|
||||
adapter: LocalBackupAdapter,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// LocateLocal finds the backup for a server and returns the local path. This
|
||||
// will obviously only work if the backup was created as a local backup.
|
||||
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
||||
@@ -50,7 +56,7 @@ func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
|
||||
// Generate generates a backup of the selected files and pushes it to the
|
||||
// defined location for this instance.
|
||||
func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
|
||||
a := &filesystem.Archive{
|
||||
a := &Archive{
|
||||
BasePath: basePath,
|
||||
Ignore: ignore,
|
||||
}
|
||||
@@ -64,16 +70,3 @@ func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error)
|
||||
return b.Details(), nil
|
||||
}
|
||||
|
||||
// Restore restores a backup to the provided server's root data directory.
|
||||
func (b *LocalBackup) Restore(s *server.Server) error {
|
||||
return archiver.Walk(b.Path(), func(f archiver.File) error {
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name, err := filesystem.ExtractArchiveSourceName(f, "/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Filesystem().Writefile(name, f)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,12 +2,12 @@ package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/pterodactyl/wings/api"
|
||||
)
|
||||
|
||||
type S3Backup struct {
|
||||
@@ -16,22 +16,32 @@ type S3Backup struct {
|
||||
|
||||
var _ BackupInterface = (*S3Backup)(nil)
|
||||
|
||||
// Removes a backup from the system.
|
||||
func NewS3(uuid string, ignore string) *S3Backup {
|
||||
return &S3Backup{
|
||||
Backup{
|
||||
Uuid: uuid,
|
||||
Ignore: ignore,
|
||||
adapter: S3BackupAdapter,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes a backup from the system.
|
||||
func (s *S3Backup) Remove() error {
|
||||
return os.Remove(s.Path())
|
||||
}
|
||||
|
||||
// Attaches additional context to the log output for this backup.
|
||||
// WithLogContext attaches additional context to the log output for this backup.
|
||||
func (s *S3Backup) WithLogContext(c map[string]interface{}) {
|
||||
s.logContext = c
|
||||
}
|
||||
|
||||
// Generates a new backup on the disk, moves it into the S3 bucket via the provided
|
||||
// presigned URL, and then deletes the backup from the disk.
|
||||
// Generate creates a new backup on the disk, moves it into the S3 bucket via
|
||||
// the provided presigned URL, and then deletes the backup from the disk.
|
||||
func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
|
||||
defer s.Remove()
|
||||
|
||||
a := &filesystem.Archive{
|
||||
a := &Archive{
|
||||
BasePath: basePath,
|
||||
Ignore: ignore,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user