Rework archiving logic to be more consistent and less impactful on disk IO (#79)
Co-authored-by: Dane Everitt <dane@daneeveritt.com>
This commit is contained in:
parent
c0523df696
commit
901ab1157d
2
go.mod
2
go.mod
|
@ -62,7 +62,7 @@ require (
|
||||||
github.com/pkg/sftp v1.11.0
|
github.com/pkg/sftp v1.11.0
|
||||||
github.com/prometheus/common v0.11.1 // indirect
|
github.com/prometheus/common v0.11.1 // indirect
|
||||||
github.com/remeh/sizedwaitgroup v1.0.0
|
github.com/remeh/sizedwaitgroup v1.0.0
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94
|
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f
|
||||||
github.com/spf13/cobra v1.0.0
|
github.com/spf13/cobra v1.0.0
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.7 // indirect
|
github.com/ulikunitz/xz v0.5.7 // indirect
|
||||||
|
|
2
go.sum
2
go.sum
|
@ -482,6 +482,8 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD
|
||||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0=
|
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0=
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ=
|
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ=
|
||||||
|
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f h1:8P2MkG70G76gnZBOPGwmMIgwBb/rESQuwsJ7K8ds4NE=
|
||||||
|
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
"github.com/pterodactyl/wings/server/backup"
|
"github.com/pterodactyl/wings/server/backup"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
)
|
)
|
||||||
|
@ -30,58 +30,38 @@ func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, suc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get all of the ignored files for a server based on its .pteroignore file in the root.
|
// Get all of the ignored files for a server based on its .pteroignore file in the root.
|
||||||
func (s *Server) getServerwideIgnoredFiles() ([]string, error) {
|
func (s *Server) getServerwideIgnoredFiles() (string, error) {
|
||||||
var ignored []string
|
|
||||||
|
|
||||||
f, err := os.Open(path.Join(s.Filesystem().Path(), ".pteroignore"))
|
f, err := os.Open(path.Join(s.Filesystem().Path(), ".pteroignore"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
return nil, err
|
return "", err
|
||||||
}
|
|
||||||
} else {
|
|
||||||
scanner := bufio.NewScanner(f)
|
|
||||||
for scanner.Scan() {
|
|
||||||
// Only include non-empty lines, for the sake of clarity...
|
|
||||||
if t := scanner.Text(); t != "" {
|
|
||||||
ignored = append(ignored, t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
return "", nil
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ignored, nil
|
b, err := ioutil.ReadAll(f)
|
||||||
}
|
if err != nil {
|
||||||
|
return "", err
|
||||||
// Get the backup files to include when generating it.
|
|
||||||
func (s *Server) GetIncludedBackupFiles(ignored []string) (*backup.IncludedFiles, error) {
|
|
||||||
// If no ignored files are present in the request, check for a .pteroignore file in the root
|
|
||||||
// of the server files directory, and use that to generate the backup.
|
|
||||||
if len(ignored) == 0 {
|
|
||||||
if i, err := s.getServerwideIgnoredFiles(); err != nil {
|
|
||||||
s.Log().WithField("error", err).Warn("failed to retrieve ignored files listing for server")
|
|
||||||
} else {
|
|
||||||
ignored = i
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the included files based on the root path and the ignored files provided.
|
return string(b), nil
|
||||||
return s.Filesystem().GetIncludedFiles(s.Filesystem().Path(), ignored)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Performs a server backup and then emits the event over the server websocket. We
|
// Performs a server backup and then emits the event over the server websocket. We
|
||||||
// let the actual backup system handle notifying the panel of the status, but that
|
// let the actual backup system handle notifying the panel of the status, but that
|
||||||
// won't emit a websocket event.
|
// won't emit a websocket event.
|
||||||
func (s *Server) Backup(b backup.BackupInterface) error {
|
func (s *Server) Backup(b backup.BackupInterface) error {
|
||||||
// Get the included files based on the root path and the ignored files provided.
|
ignored := b.Ignored()
|
||||||
inc, err := s.GetIncludedBackupFiles(b.Ignored())
|
if b.Ignored() == "" {
|
||||||
if err != nil {
|
if i, err := s.getServerwideIgnoredFiles(); err != nil {
|
||||||
return err
|
log.WithField("server", s.Id()).WithField("error", err).Warn("failed to get server-wide ignored files")
|
||||||
|
} else {
|
||||||
|
ignored = i
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ad, err := b.Generate(inc, s.Filesystem().Path())
|
ad, err := b.Generate(s.Filesystem().Path(), ignored)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
|
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
|
||||||
s.Log().WithFields(log.Fields{
|
s.Log().WithFields(log.Fields{
|
||||||
|
@ -109,7 +89,8 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
||||||
// Try to notify the panel about the status of this backup. If for some reason this request
|
// Try to notify the panel about the status of this backup. If for some reason this request
|
||||||
// fails, delete the archive from the daemon and return that error up the chain to the caller.
|
// fails, delete the archive from the daemon and return that error up the chain to the caller.
|
||||||
if notifyError := s.notifyPanelOfBackup(b.Identifier(), ad, true); notifyError != nil {
|
if notifyError := s.notifyPanelOfBackup(b.Identifier(), ad, true); notifyError != nil {
|
||||||
b.Remove()
|
_ = b.Remove()
|
||||||
|
|
||||||
s.Log().WithField("error", notifyError).Info("failed to notify panel of successful backup state")
|
s.Log().WithField("error", notifyError).Info("failed to notify panel of successful backup state")
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
|
@ -118,7 +99,7 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
||||||
|
|
||||||
// Emit an event over the socket so we can update the backup in realtime on
|
// Emit an event over the socket so we can update the backup in realtime on
|
||||||
// the frontend for the server.
|
// the frontend for the server.
|
||||||
s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
_ = s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
||||||
"uuid": b.Identifier(),
|
"uuid": b.Identifier(),
|
||||||
"is_successful": true,
|
"is_successful": true,
|
||||||
"checksum": ad.Checksum,
|
"checksum": ad.Checksum,
|
||||||
|
|
|
@ -1,150 +0,0 @@
|
||||||
package backup
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"context"
|
|
||||||
"emperror.dev/errors"
|
|
||||||
"github.com/apex/log"
|
|
||||||
"github.com/juju/ratelimit"
|
|
||||||
gzip "github.com/klauspost/pgzip"
|
|
||||||
"github.com/pterodactyl/wings/config"
|
|
||||||
"github.com/remeh/sizedwaitgroup"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Archive struct {
|
|
||||||
sync.Mutex
|
|
||||||
|
|
||||||
TrimPrefix string
|
|
||||||
Files *IncludedFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates an archive at dst with all of the files defined in the included files struct.
|
|
||||||
func (a *Archive) Create(dst string, ctx context.Context) error {
|
|
||||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
// Select a writer based off of the WriteLimit configuration option.
|
|
||||||
var writer io.Writer
|
|
||||||
if writeLimit := config.Get().System.Backups.WriteLimit; writeLimit < 1 {
|
|
||||||
// If there is no write limit, use the file as the writer.
|
|
||||||
writer = f
|
|
||||||
} else {
|
|
||||||
// Token bucket with a capacity of "writeLimit" MiB, adding "writeLimit" MiB/s
|
|
||||||
bucket := ratelimit.NewBucketWithRate(float64(writeLimit)*1024*1024, int64(writeLimit)*1024*1024)
|
|
||||||
|
|
||||||
// Wrap the file writer with the token bucket limiter.
|
|
||||||
writer = ratelimit.Writer(f, bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
maxCpu := runtime.NumCPU() / 2
|
|
||||||
if maxCpu > 4 {
|
|
||||||
maxCpu = 4
|
|
||||||
}
|
|
||||||
|
|
||||||
gzw, err := gzip.NewWriterLevel(writer, gzip.BestSpeed)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to create gzip writer")
|
|
||||||
}
|
|
||||||
if err := gzw.SetConcurrency(1<<20, maxCpu); err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to set gzip concurrency")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer gzw.Flush()
|
|
||||||
defer gzw.Close()
|
|
||||||
|
|
||||||
tw := tar.NewWriter(gzw)
|
|
||||||
defer tw.Flush()
|
|
||||||
defer tw.Close()
|
|
||||||
|
|
||||||
wg := sizedwaitgroup.New(10)
|
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
|
||||||
// Iterate over all of the files to be included and put them into the archive. This is
|
|
||||||
// done as a concurrent goroutine to speed things along. If an error is encountered at
|
|
||||||
// any step, the entire process is aborted.
|
|
||||||
for _, p := range a.Files.All() {
|
|
||||||
p := p
|
|
||||||
g.Go(func() error {
|
|
||||||
wg.Add()
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
return a.addToArchive(p, tw)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block until the entire routine is completed.
|
|
||||||
if err := g.Wait(); err != nil {
|
|
||||||
f.Close()
|
|
||||||
|
|
||||||
// Attempt to remove the archive if there is an error, report that error to
|
|
||||||
// the logger if it fails.
|
|
||||||
if rerr := os.Remove(dst); rerr != nil && !os.IsNotExist(rerr) {
|
|
||||||
log.WithField("location", dst).Warn("failed to delete corrupted backup archive")
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds a single file to the existing tar archive writer.
|
|
||||||
func (a *Archive) addToArchive(p string, w *tar.Writer) error {
|
|
||||||
f, err := os.Open(p)
|
|
||||||
if err != nil {
|
|
||||||
// If you try to backup something that no longer exists (got deleted somewhere during the process
|
|
||||||
// but not by this process), just skip over it and don't kill the entire backup.
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
s, err := f.Stat()
|
|
||||||
if err != nil {
|
|
||||||
// Same as above, don't kill the process just because the file no longer exists.
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
name := strings.TrimPrefix(p, a.TrimPrefix)
|
|
||||||
header, err := tar.FileInfoHeader(s, name)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to get tar#FileInfoHeader for "+name)
|
|
||||||
}
|
|
||||||
header.Name = name
|
|
||||||
|
|
||||||
// These actions must occur sequentially, even if this function is called multiple
|
|
||||||
// in parallel. You'll get some nasty panic's otherwise.
|
|
||||||
a.Lock()
|
|
||||||
defer a.Unlock()
|
|
||||||
|
|
||||||
if err := w.WriteHeader(header); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, 4*1024)
|
|
||||||
if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to copy "+header.Name+" to archive")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -40,7 +40,7 @@ type Backup struct {
|
||||||
|
|
||||||
// An array of files to ignore when generating this backup. This should be
|
// An array of files to ignore when generating this backup. This should be
|
||||||
// compatible with a standard .gitignore structure.
|
// compatible with a standard .gitignore structure.
|
||||||
IgnoredFiles []string `json:"ignored_files"`
|
Ignore string `json:"ignore"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// noinspection GoNameStartsWithPackageName
|
// noinspection GoNameStartsWithPackageName
|
||||||
|
@ -50,12 +50,12 @@ type BackupInterface interface {
|
||||||
|
|
||||||
// Generates a backup in whatever the configured source for the specific
|
// Generates a backup in whatever the configured source for the specific
|
||||||
// implementation is.
|
// implementation is.
|
||||||
Generate(*IncludedFiles, string) (*ArchiveDetails, error)
|
Generate(string, string) (*ArchiveDetails, error)
|
||||||
|
|
||||||
// Returns the ignored files for this backup instance.
|
// Returns the ignored files for this backup instance.
|
||||||
Ignored() []string
|
Ignored() string
|
||||||
|
|
||||||
// Returns a SHA256 checksum for the generated backup.
|
// Returns a SHA1 checksum for the generated backup.
|
||||||
Checksum() ([]byte, error)
|
Checksum() ([]byte, error)
|
||||||
|
|
||||||
// Returns the size of the generated backup.
|
// Returns the size of the generated backup.
|
||||||
|
@ -153,6 +153,6 @@ func (b *Backup) Details() *ArchiveDetails {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Backup) Ignored() []string {
|
func (b *Backup) Ignored() string {
|
||||||
return b.IgnoredFiles
|
return b.Ignore
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
package backup
|
package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
|
"github.com/pterodactyl/wings/server/filesystem"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,8 +17,8 @@ var _ BackupInterface = (*LocalBackup)(nil)
|
||||||
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
||||||
b := &LocalBackup{
|
b := &LocalBackup{
|
||||||
Backup{
|
Backup{
|
||||||
Uuid: uuid,
|
Uuid: uuid,
|
||||||
IgnoredFiles: nil,
|
Ignore: "",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,13 +41,13 @@ func (b *LocalBackup) Remove() error {
|
||||||
|
|
||||||
// Generates a backup of the selected files and pushes it to the defined location
|
// Generates a backup of the selected files and pushes it to the defined location
|
||||||
// for this instance.
|
// for this instance.
|
||||||
func (b *LocalBackup) Generate(included *IncludedFiles, prefix string) (*ArchiveDetails, error) {
|
func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
|
||||||
a := &Archive{
|
a := &filesystem.Archive{
|
||||||
TrimPrefix: prefix,
|
BasePath: basePath,
|
||||||
Files: included,
|
Ignore: ignore,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.Create(b.Path(), context.Background()); err != nil {
|
if err := a.Create(b.Path()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Request struct {
|
type Request struct {
|
||||||
Adapter string `json:"adapter"`
|
Adapter string `json:"adapter"`
|
||||||
Uuid string `json:"uuid"`
|
Uuid string `json:"uuid"`
|
||||||
IgnoredFiles []string `json:"ignored_files"`
|
Ignore string `json:"ignore"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generates a new local backup struct.
|
// Generates a new local backup struct.
|
||||||
|
@ -19,8 +19,8 @@ func (r *Request) NewLocalBackup() (*LocalBackup, error) {
|
||||||
|
|
||||||
return &LocalBackup{
|
return &LocalBackup{
|
||||||
Backup{
|
Backup{
|
||||||
Uuid: r.Uuid,
|
Uuid: r.Uuid,
|
||||||
IgnoredFiles: r.IgnoredFiles,
|
Ignore: r.Ignore,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -33,8 +33,8 @@ func (r *Request) NewS3Backup() (*S3Backup, error) {
|
||||||
|
|
||||||
return &S3Backup{
|
return &S3Backup{
|
||||||
Backup: Backup{
|
Backup: Backup{
|
||||||
Uuid: r.Uuid,
|
Uuid: r.Uuid,
|
||||||
IgnoredFiles: r.IgnoredFiles,
|
Ignore: r.Ignore,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
package backup
|
package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
|
"github.com/pterodactyl/wings/server/filesystem"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
@ -19,15 +19,15 @@ var _ BackupInterface = (*S3Backup)(nil)
|
||||||
|
|
||||||
// Generates a new backup on the disk, moves it into the S3 bucket via the provided
|
// Generates a new backup on the disk, moves it into the S3 bucket via the provided
|
||||||
// presigned URL, and then deletes the backup from the disk.
|
// presigned URL, and then deletes the backup from the disk.
|
||||||
func (s *S3Backup) Generate(included *IncludedFiles, prefix string) (*ArchiveDetails, error) {
|
func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
|
||||||
defer s.Remove()
|
defer s.Remove()
|
||||||
|
|
||||||
a := &Archive{
|
a := &filesystem.Archive{
|
||||||
TrimPrefix: prefix,
|
BasePath: basePath,
|
||||||
Files: included,
|
Ignore: ignore,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.Create(s.Path(), context.Background()); err != nil {
|
if err := a.Create(s.Path()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
package backup
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type IncludedFiles struct {
|
|
||||||
sync.RWMutex
|
|
||||||
files []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pushes an additional file or folder onto the struct.
|
|
||||||
func (i *IncludedFiles) Push(p string) {
|
|
||||||
i.Lock()
|
|
||||||
i.files = append(i.files, p) // ~~
|
|
||||||
i.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns all of the files that were marked as being included.
|
|
||||||
func (i *IncludedFiles) All() []string {
|
|
||||||
i.RLock()
|
|
||||||
defer i.RUnlock()
|
|
||||||
|
|
||||||
return i.files
|
|
||||||
}
|
|
207
server/filesystem/archive.go
Normal file
207
server/filesystem/archive.go
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"github.com/juju/ratelimit"
|
||||||
|
"github.com/karrick/godirwalk"
|
||||||
|
"github.com/klauspost/pgzip"
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/sabhiram/go-gitignore"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const memory = 4 * 1024
|
||||||
|
|
||||||
|
var pool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
b := make([]byte, memory)
|
||||||
|
return b
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
type Archive struct {
|
||||||
|
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
||||||
|
// relative to.
|
||||||
|
BasePath string
|
||||||
|
|
||||||
|
// Ignore is a gitignore string (most likely read from a file) of files to ignore
|
||||||
|
// from the archive.
|
||||||
|
Ignore string
|
||||||
|
|
||||||
|
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
||||||
|
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
||||||
|
Files []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates an archive at dst with all of the files defined in the included files struct.
|
||||||
|
func (a *Archive) Create(dst string) error {
|
||||||
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Select a writer based off of the WriteLimit configuration option. If there is no
|
||||||
|
// write limit, use the file as the writer.
|
||||||
|
var writer io.Writer = f
|
||||||
|
writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024)
|
||||||
|
if writeLimit > 0 {
|
||||||
|
// Token bucket with a capacity of "writeLimit" MiB, adding "writeLimit" MiB/s
|
||||||
|
// and then wrap the file writer with the token bucket limiter.
|
||||||
|
writer = ratelimit.Writer(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new gzip writer around the file.
|
||||||
|
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
|
||||||
|
_ = gw.SetConcurrency(1<<20, 1)
|
||||||
|
defer gw.Close()
|
||||||
|
|
||||||
|
// Create a new tar writer around the gzip writer.
|
||||||
|
tw := tar.NewWriter(gw)
|
||||||
|
defer tw.Close()
|
||||||
|
|
||||||
|
// Configure godirwalk.
|
||||||
|
options := &godirwalk.Options{
|
||||||
|
FollowSymbolicLinks: false,
|
||||||
|
Unsorted: true,
|
||||||
|
Callback: a.callback(tw),
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're specifically looking for only certain files, or have requested
|
||||||
|
// that certain files be ignored we'll update the callback function to reflect
|
||||||
|
// that request.
|
||||||
|
if len(a.Files) == 0 && len(a.Ignore) > 0 {
|
||||||
|
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
|
||||||
|
options.Callback = a.callback(tw, func(_ string, rp string) error {
|
||||||
|
if i.MatchesPath(rp) {
|
||||||
|
return godirwalk.SkipThis
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
} else if len(a.Files) > 0 {
|
||||||
|
options.Callback = a.withFilesCallback(tw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively walk the path we are archiving.
|
||||||
|
return godirwalk.Walk(a.BasePath, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Callback function used to determine if a given file should be included in the archive
|
||||||
|
// being generated.
|
||||||
|
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
||||||
|
return func(path string, de *godirwalk.Dirent) error {
|
||||||
|
// Skip directories because we walking them recursively.
|
||||||
|
if de.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
relative := filepath.ToSlash(strings.TrimPrefix(path, a.BasePath+string(filepath.Separator)))
|
||||||
|
// Call the additional options passed to this callback function. If any of them return
|
||||||
|
// a non-nil error we will exit immediately.
|
||||||
|
for _, opt := range opts {
|
||||||
|
if err := opt(path, relative); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the file to the archive, if it is nested in a directory,
|
||||||
|
// the directory will be automatically "created" in the archive.
|
||||||
|
return a.addToArchive(path, relative, tw)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pushes only files defined in the Files key to the final archive.
|
||||||
|
func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirwalk.Dirent) error {
|
||||||
|
return a.callback(tw, func(p string, rp string) error {
|
||||||
|
for _, f := range a.Files {
|
||||||
|
// If the given doesn't match, or doesn't have the same prefix continue
|
||||||
|
// to the next item in the loop.
|
||||||
|
if p != f && !strings.HasPrefix(p, f) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Once we have a match return a nil value here so that the loop stops and the
|
||||||
|
// call to this function will correctly include the file in the archive. If there
|
||||||
|
// are no matches we'll never make it to this line, and the final error returned
|
||||||
|
// will be the godirwalk.SkipThis error.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return godirwalk.SkipThis
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a given file path to the final archive being created.
|
||||||
|
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
||||||
|
// Lstat the file, this will give us the same information as Stat except
|
||||||
|
// that it will not follow a symlink to it's target automatically.
|
||||||
|
s, err := os.Lstat(p)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.WithMessage(err, "failed to Lstat '"+rp+"'")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve the symlink target if the file is a symlink.
|
||||||
|
var target string
|
||||||
|
if s.Mode()&os.ModeSymlink != 0 {
|
||||||
|
// Read the target of the symlink.
|
||||||
|
target, err = os.Readlink(s.Name())
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithMessage(err, "failed to read symlink target for '"+rp+"'")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the tar FileInfoHeader in order to add the file to the archive.
|
||||||
|
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithMessagef(err, "failed to get tar#FileInfoHeader for '%s'", rp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fix the header name if the file is not a symlink.
|
||||||
|
if s.Mode()&os.ModeSymlink == 0 {
|
||||||
|
header.Name = rp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the tar FileInfoHeader to the archive.
|
||||||
|
if err := w.WriteHeader(header); err != nil {
|
||||||
|
return errors.WithMessagef(err, "failed to write tar#FileInfoHeader for '%s'", rp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
|
||||||
|
if header.Size < 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the buffer size is larger than the file size, create a smaller buffer to hold the file.
|
||||||
|
var buf []byte
|
||||||
|
if header.Size < memory {
|
||||||
|
buf = make([]byte, header.Size)
|
||||||
|
} else {
|
||||||
|
// Get a fixed-size buffer from the pool to save on allocations.
|
||||||
|
buf = pool.Get().([]byte)
|
||||||
|
defer func() {
|
||||||
|
buf = make([]byte, memory)
|
||||||
|
pool.Put(buf)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open the file.
|
||||||
|
f, err := os.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.WithMessagef(err, "failed to open '%s' for copying", header.Name)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
// Copy the file's contents to the archive using our buffer.
|
||||||
|
if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
|
||||||
|
return errors.WithMessagef(err, "failed to copy '%s' to archive", header.Name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,11 +1,7 @@
|
||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/karrick/godirwalk"
|
|
||||||
"github.com/pterodactyl/wings/server/backup"
|
|
||||||
ignore "github.com/sabhiram/go-gitignore"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -13,60 +9,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Given a directory, iterate through all of the files and folders within it and determine
|
|
||||||
// if they should be included in the output based on an array of ignored matches. This uses
|
|
||||||
// standard .gitignore formatting to make that determination.
|
|
||||||
//
|
|
||||||
// If no ignored files are passed through you'll get the entire directory listing.
|
|
||||||
func (fs *Filesystem) GetIncludedFiles(dir string, ignored []string) (*backup.IncludedFiles, error) {
|
|
||||||
cleaned, err := fs.SafePath(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
i, err := ignore.CompileIgnoreLines(ignored...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk through all of the files and directories on a server. This callback only returns
|
|
||||||
// files found, and will keep walking deeper and deeper into directories.
|
|
||||||
inc := new(backup.IncludedFiles)
|
|
||||||
|
|
||||||
err = godirwalk.Walk(cleaned, &godirwalk.Options{
|
|
||||||
Unsorted: true,
|
|
||||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
|
||||||
sp := p
|
|
||||||
if e.IsSymlink() {
|
|
||||||
sp, err = fs.SafePath(p)
|
|
||||||
if err != nil {
|
|
||||||
if IsErrorCode(err, ErrCodePathResolution) {
|
|
||||||
return godirwalk.SkipThis
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only push files into the result array since archives can't create an empty directory within them.
|
|
||||||
if !e.IsDir() {
|
|
||||||
// Avoid unnecessary parsing if there are no ignored files, nothing will match anyways
|
|
||||||
// so no reason to call the function.
|
|
||||||
if len(ignored) == 0 || !i.MatchesPath(strings.TrimPrefix(sp, fs.Path()+"/")) {
|
|
||||||
inc.Push(sp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We can't just abort if the path is technically ignored. It is possible there is a nested
|
|
||||||
// file or folder that should not be excluded, so in this case we need to just keep going
|
|
||||||
// until we get to a final state.
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
return inc, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compresses all of the files matching the given paths in the specified directory. This function
|
// Compresses all of the files matching the given paths in the specified directory. This function
|
||||||
// also supports passing nested paths to only compress certain files and folders when working in
|
// also supports passing nested paths to only compress certain files and folders when working in
|
||||||
// a larger directory. This effectively creates a local backup, but rather than ignoring specific
|
// a larger directory. This effectively creates a local backup, but rather than ignoring specific
|
||||||
|
@ -90,69 +32,24 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
inc := new(backup.IncludedFiles)
|
a := &Archive{BasePath: cleanedRootDir, Files: cleaned}
|
||||||
// Iterate over all of the cleaned paths and merge them into a large object of final file
|
d := path.Join(
|
||||||
// paths to pass into the archiver. As directories are encountered this will drop into them
|
cleanedRootDir,
|
||||||
// and look for all of the files.
|
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
|
||||||
for _, p := range cleaned {
|
)
|
||||||
f, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
fs.error(err).WithField("path", p).Debug("failed to stat file or directory for compression")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !f.IsDir() {
|
if err := a.Create(d); err != nil {
|
||||||
inc.Push(p)
|
|
||||||
} else {
|
|
||||||
err := godirwalk.Walk(p, &godirwalk.Options{
|
|
||||||
Unsorted: true,
|
|
||||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
|
||||||
sp := p
|
|
||||||
if e.IsSymlink() {
|
|
||||||
// Ensure that any symlinks are properly resolved to their final destination. If
|
|
||||||
// that destination is outside the server directory skip over this entire item, otherwise
|
|
||||||
// use the resolved location for the rest of this function.
|
|
||||||
sp, err = fs.SafePath(p)
|
|
||||||
if err != nil {
|
|
||||||
if IsErrorCode(err, ErrCodePathResolution) {
|
|
||||||
return godirwalk.SkipThis
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !e.IsDir() {
|
|
||||||
inc.Push(sp)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
a := &backup.Archive{TrimPrefix: fs.Path(), Files: inc}
|
|
||||||
d := path.Join(cleanedRootDir, fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")))
|
|
||||||
|
|
||||||
if err := a.Create(d, context.Background()); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.Stat(d)
|
f, err := os.Stat(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = os.Remove(d)
|
_ = os.Remove(d)
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fs.HasSpaceFor(f.Size()); err != nil {
|
if err := fs.HasSpaceFor(f.Size()); err != nil {
|
||||||
_ = os.Remove(d)
|
_ = os.Remove(d)
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user