Add support for ignoring directories/files; fix compression of archives
This commit is contained in:
@@ -35,7 +35,12 @@ func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, suc
|
||||
// let the actual backup system handle notifying the panel of the status, but that
|
||||
// won't emit a websocket event.
|
||||
func (s *Server) BackupLocal(b *backup.LocalBackup) error {
|
||||
if err := b.Backup(s.Filesystem.Path()); err != nil {
|
||||
inc, err := s.Filesystem.GetIncludedFiles(s.Filesystem.Path(), b.IgnoredFiles)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := b.Backup(inc, s.Filesystem.Path()); err != nil {
|
||||
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
|
||||
zap.S().Warnw("failed to notify panel of failed backup state", zap.String("backup", b.Uuid), zap.Error(err))
|
||||
}
|
||||
|
||||
111
server/backup/archiver.go
Normal file
111
server/backup/archiver.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Archive struct {
|
||||
sync.Mutex
|
||||
|
||||
TrimPrefix string
|
||||
Files *IncludedFiles
|
||||
}
|
||||
|
||||
// Creates an archive at dest with all of the files definied in the included files struct.
|
||||
func (a *Archive) Create(dest string, ctx context.Context) error {
|
||||
f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
gzw := gzip.NewWriter(f)
|
||||
defer gzw.Close()
|
||||
|
||||
tw := tar.NewWriter(gzw)
|
||||
defer tw.Close()
|
||||
|
||||
wg := sizedwaitgroup.New(10)
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
// Iterate over all of the files to be included and put them into the archive. This is
|
||||
// done as a concurrent goroutine to speed things along. If an error is encountered at
|
||||
// any step, the entire process is aborted.
|
||||
for p, s := range a.Files.All() {
|
||||
if (*s).IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
pa := p
|
||||
st := s
|
||||
|
||||
g.Go(func() error {
|
||||
wg.Add()
|
||||
defer wg.Done()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
return a.addToArchive(pa, st, tw)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Block until the entire routine is completed.
|
||||
if err := g.Wait(); err != nil {
|
||||
f.Close()
|
||||
|
||||
// Attempt to remove the archive if there is an error, report that error to
|
||||
// the logger if it fails.
|
||||
if rerr := os.Remove(dest); rerr != nil && !os.IsNotExist(rerr) {
|
||||
zap.S().Warnw("failed to delete corrupted backup archive", zap.String("location", dest))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds a single file to the existing tar archive writer.
|
||||
func (a *Archive) addToArchive(p string, s *os.FileInfo, w *tar.Writer) error {
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
st := *s
|
||||
header := &tar.Header{
|
||||
// Trim the long server path from the name of the file so that the resulting
|
||||
// archive is exactly how the user would see it in the panel file manager.
|
||||
Name: strings.TrimPrefix(p, a.TrimPrefix),
|
||||
Size: st.Size(),
|
||||
Mode: int64(st.Mode()),
|
||||
ModTime: st.ModTime(),
|
||||
}
|
||||
|
||||
// These actions must occur sequentially, even if this function is called multiple
|
||||
// in parallel. You'll get some nasty panic's otherwise.
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
if err = w.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(w, f); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -10,7 +10,7 @@ type Backup interface {
|
||||
|
||||
// Generates a backup in whatever the configured source for the specific
|
||||
// implementation is.
|
||||
Backup(dir string) error
|
||||
Backup(*IncludedFiles, string) error
|
||||
|
||||
// Returns a SHA256 checksum for the generated backup.
|
||||
Checksum() ([]byte, error)
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"go.uber.org/zap"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
@@ -79,25 +78,15 @@ func (b *LocalBackup) Remove() error {
|
||||
|
||||
// Generates a backup of the selected files and pushes it to the defined location
|
||||
// for this instance.
|
||||
func (b *LocalBackup) Backup(dir string) error {
|
||||
if err := archiver.Archive([]string{dir}, b.Path()); err != nil {
|
||||
if strings.HasPrefix(err.Error(), "file already exists") {
|
||||
if rerr := os.Remove(b.Path()); rerr != nil {
|
||||
return errors.WithStack(rerr)
|
||||
}
|
||||
|
||||
// Re-attempt this backup by calling it with the same information.
|
||||
return b.Backup(dir)
|
||||
}
|
||||
|
||||
// If there was some error with the archive, just go ahead and ensure the backup
|
||||
// is completely destroyed at this point. Ignore any errors from this function.
|
||||
os.Remove(b.Path())
|
||||
|
||||
return errors.WithStack(err)
|
||||
func (b *LocalBackup) Backup(included *IncludedFiles, prefix string) error {
|
||||
a := &Archive{
|
||||
TrimPrefix: prefix,
|
||||
Files: included,
|
||||
}
|
||||
|
||||
return nil
|
||||
err := a.Create(b.Path(), context.Background())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Return the size of the generated backup.
|
||||
@@ -162,4 +151,4 @@ func (b *LocalBackup) ensureLocalBackupLocation() error {
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ func (s *S3Backup) Identifier() string {
|
||||
return s.Uuid
|
||||
}
|
||||
|
||||
func (s *S3Backup) Backup(dir string) error {
|
||||
func (s *S3Backup) Backup(included *IncludedFiles, prefix string) error {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
|
||||
31
server/backup/included.go
Normal file
31
server/backup/included.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type IncludedFiles struct {
|
||||
sync.RWMutex
|
||||
files map[string]*os.FileInfo
|
||||
}
|
||||
|
||||
// Pushes an additional file or folder onto the struct.
|
||||
func (i *IncludedFiles) Push(info *os.FileInfo, p string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
|
||||
if i.files == nil {
|
||||
i.files = make(map[string]*os.FileInfo)
|
||||
}
|
||||
|
||||
i.files[p] = info
|
||||
}
|
||||
|
||||
// Returns all of the files that were marked as being included.
|
||||
func (i *IncludedFiles) All() map[string]*os.FileInfo {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
|
||||
return i.files
|
||||
}
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"github.com/gabriel-vasile/mimetype"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
ignore "github.com/sabhiram/go-gitignore"
|
||||
"go.uber.org/zap"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -561,3 +563,43 @@ func (fs *Filesystem) EnsureDataDirectory() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Given a directory, iterate through all of the files and folders within it and determine
|
||||
// if they should be included in the output based on an array of ignored matches. This uses
|
||||
// standard .gitignore formatting to make that determination.
|
||||
//
|
||||
// If no ignored files are passed through you'll get the entire directory listing.
|
||||
func (fs *Filesystem) GetIncludedFiles(dir string, ignored []string) (*backup.IncludedFiles, error) {
|
||||
cleaned, err := fs.SafePath(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := fs.NewWalker()
|
||||
ctx := context.Background()
|
||||
|
||||
i, err := ignore.CompileIgnoreLines(ignored...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Walk through all of the files and directories on a server. This callback only returns
|
||||
// files found, and will keep walking deeper and deeper into directories.
|
||||
inc := new(backup.IncludedFiles)
|
||||
if err := w.Walk(cleaned, ctx, func(f os.FileInfo, p string) bool {
|
||||
// Avoid unnecessary parsing if there are no ignored files, nothing will match anyways
|
||||
// so no reason to call the function.
|
||||
if len(ignored) == 0 || !i.MatchesPath(strings.TrimPrefix(p, fs.Path() + "/")) {
|
||||
inc.Push(&f, p)
|
||||
}
|
||||
|
||||
// We can't just abort if the path is technically ignored. It is possible there is a nested
|
||||
// file or folder that should not be excluded, so in this case we need to just keep going
|
||||
// until we get to a final state.
|
||||
return true
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return inc, nil
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func (fw *FileWalker) Walk(dir string, ctx context.Context, callback func (os.Fi
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
fi := f
|
||||
p := filepath.Join(cleaned, f.Name())
|
||||
// Recursively call this function to continue digging through the directory tree within
|
||||
// a seperate goroutine. If the context is canceled abort this process.
|
||||
@@ -49,7 +50,7 @@ func (fw *FileWalker) Walk(dir string, ctx context.Context, callback func (os.Fi
|
||||
// If the callback returns true, go ahead and keep walking deeper. This allows
|
||||
// us to programatically continue deeper into directories, or stop digging
|
||||
// if that pathway knows it needs nothing else.
|
||||
if callback(f, p) {
|
||||
if callback(fi, p) {
|
||||
return fw.Walk(p, ctx, callback)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user