Fix import cycle issue
This commit is contained in:
@@ -1,224 +0,0 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/sabhiram/go-gitignore"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const memory = 4 * 1024
|
||||
|
||||
var pool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, memory)
|
||||
return b
|
||||
},
|
||||
}
|
||||
|
||||
type Archive struct {
|
||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
||||
// relative to.
|
||||
BasePath string
|
||||
|
||||
// Ignore is a gitignore string (most likely read from a file) of files to ignore
|
||||
// from the archive.
|
||||
Ignore string
|
||||
|
||||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
||||
Files []string
|
||||
}
|
||||
|
||||
// Creates an archive at dst with all of the files defined in the included files struct.
|
||||
func (a *Archive) Create(dst string) error {
|
||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Select a writer based off of the WriteLimit configuration option. If there is no
|
||||
// write limit, use the file as the writer.
|
||||
var writer io.Writer
|
||||
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
||||
// Token bucket with a capacity of "writeLimit" MiB, adding "writeLimit" MiB/s
|
||||
// and then wrap the file writer with the token bucket limiter.
|
||||
writer = ratelimit.Writer(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
||||
} else {
|
||||
writer = f
|
||||
}
|
||||
|
||||
// Create a new gzip writer around the file.
|
||||
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
|
||||
_ = gw.SetConcurrency(1<<20, 1)
|
||||
defer gw.Close()
|
||||
|
||||
// Create a new tar writer around the gzip writer.
|
||||
tw := tar.NewWriter(gw)
|
||||
defer tw.Close()
|
||||
|
||||
// Configure godirwalk.
|
||||
options := &godirwalk.Options{
|
||||
FollowSymbolicLinks: false,
|
||||
Unsorted: true,
|
||||
Callback: a.callback(tw),
|
||||
}
|
||||
|
||||
// If we're specifically looking for only certain files, or have requested
|
||||
// that certain files be ignored we'll update the callback function to reflect
|
||||
// that request.
|
||||
if len(a.Files) == 0 && len(a.Ignore) > 0 {
|
||||
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
|
||||
|
||||
options.Callback = a.callback(tw, func(_ string, rp string) error {
|
||||
if i.MatchesPath(rp) {
|
||||
return godirwalk.SkipThis
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
} else if len(a.Files) > 0 {
|
||||
options.Callback = a.withFilesCallback(tw)
|
||||
}
|
||||
|
||||
// Recursively walk the path we are archiving.
|
||||
return godirwalk.Walk(a.BasePath, options)
|
||||
}
|
||||
|
||||
// Callback function used to determine if a given file should be included in the archive
|
||||
// being generated.
|
||||
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
||||
return func(path string, de *godirwalk.Dirent) error {
|
||||
// Skip directories because we walking them recursively.
|
||||
if de.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relative := filepath.ToSlash(strings.TrimPrefix(path, a.BasePath+string(filepath.Separator)))
|
||||
|
||||
// Call the additional options passed to this callback function. If any of them return
|
||||
// a non-nil error we will exit immediately.
|
||||
for _, opt := range opts {
|
||||
if err := opt(path, relative); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the file to the archive, if it is nested in a directory,
|
||||
// the directory will be automatically "created" in the archive.
|
||||
return a.addToArchive(path, relative, tw)
|
||||
}
|
||||
}
|
||||
|
||||
// Pushes only files defined in the Files key to the final archive.
|
||||
func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirwalk.Dirent) error {
|
||||
return a.callback(tw, func(p string, rp string) error {
|
||||
for _, f := range a.Files {
|
||||
// If the given doesn't match, or doesn't have the same prefix continue
|
||||
// to the next item in the loop.
|
||||
if p != f && !strings.HasPrefix(p, f) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Once we have a match return a nil value here so that the loop stops and the
|
||||
// call to this function will correctly include the file in the archive. If there
|
||||
// are no matches we'll never make it to this line, and the final error returned
|
||||
// will be the godirwalk.SkipThis error.
|
||||
return nil
|
||||
}
|
||||
|
||||
return godirwalk.SkipThis
|
||||
})
|
||||
}
|
||||
|
||||
// Adds a given file path to the final archive being created.
|
||||
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
||||
// Lstat the file, this will give us the same information as Stat except that it will not
|
||||
// follow a symlink to it's target automatically. This is important to avoid including
|
||||
// files that exist outside the server root unintentionally in the backup.
|
||||
s, err := os.Lstat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.WrapIff(err, "failed executing os.Lstat on '%s'", rp)
|
||||
}
|
||||
|
||||
// Resolve the symlink target if the file is a symlink.
|
||||
var target string
|
||||
if s.Mode()&os.ModeSymlink != 0 {
|
||||
// Read the target of the symlink. If there are any errors we will dump them out to
|
||||
// the logs, but we're not going to stop the backup. There are far too many cases of
|
||||
// symlinks causing all sorts of unnecessary pain in this process. Sucks to suck if
|
||||
// it doesn't work.
|
||||
target, err = os.Readlink(s.Name())
|
||||
if err != nil {
|
||||
// Ignore the not exist errors specifically, since theres nothing important about that.
|
||||
if !os.IsNotExist(err) {
|
||||
log.WithField("path", rp).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get the tar FileInfoHeader in order to add the file to the archive.
|
||||
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
|
||||
if err != nil {
|
||||
return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", rp)
|
||||
}
|
||||
|
||||
// Fix the header name if the file is not a symlink.
|
||||
if s.Mode()&os.ModeSymlink == 0 {
|
||||
header.Name = rp
|
||||
}
|
||||
|
||||
// Write the tar FileInfoHeader to the archive.
|
||||
if err := w.WriteHeader(header); err != nil {
|
||||
return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", rp)
|
||||
}
|
||||
|
||||
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
|
||||
if header.Size < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the buffer size is larger than the file size, create a smaller buffer to hold the file.
|
||||
var buf []byte
|
||||
if header.Size < memory {
|
||||
buf = make([]byte, header.Size)
|
||||
} else {
|
||||
// Get a fixed-size buffer from the pool to save on allocations.
|
||||
buf = pool.Get().([]byte)
|
||||
defer func() {
|
||||
buf = make([]byte, memory)
|
||||
pool.Put(buf)
|
||||
}()
|
||||
}
|
||||
|
||||
// Open the file.
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.WrapIff(err, "failed to open '%s' for copying", header.Name)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Copy the file's contents to the archive using our buffer.
|
||||
if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
|
||||
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -6,16 +6,23 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
// Compresses all of the files matching the given paths in the specified directory. This function
|
||||
// also supports passing nested paths to only compress certain files and folders when working in
|
||||
// a larger directory. This effectively creates a local backup, but rather than ignoring specific
|
||||
// files and folders, it takes an allow-list of files and folders.
|
||||
// CompressFiles compresses all of the files matching the given paths in the
|
||||
// specified directory. This function also supports passing nested paths to only
|
||||
// compress certain files and folders when working in a larger directory. This
|
||||
// effectively creates a local backup, but rather than ignoring specific files
|
||||
// and folders, it takes an allow-list of files and folders.
|
||||
//
|
||||
// All paths are relative to the dir that is passed in as the first argument, and the compressed
|
||||
// file will be placed at that location named `archive-{date}.tar.gz`.
|
||||
// All paths are relative to the dir that is passed in as the first argument,
|
||||
// and the compressed file will be placed at that location named
|
||||
// `archive-{date}.tar.gz`.
|
||||
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
|
||||
cleanedRootDir, err := fs.SafePath(dir)
|
||||
if err != nil {
|
||||
@@ -32,7 +39,7 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a := &Archive{BasePath: cleanedRootDir, Files: cleaned}
|
||||
a := &backup.Archive{BasePath: cleanedRootDir, Files: cleaned}
|
||||
d := path.Join(
|
||||
cleanedRootDir,
|
||||
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
|
||||
@@ -57,3 +64,86 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// SpaceAvailableForDecompression looks through a given archive and determines
|
||||
// if decompressing it would put the server over its allocated disk space limit.
|
||||
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error {
|
||||
// Don't waste time trying to determine this if we know the server will have the space for
|
||||
// it since there is no limit.
|
||||
if fs.MaxDisk() <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the cached size in a parallel process so that if it is not cached we are not
|
||||
// waiting an unnecessary amount of time on this call.
|
||||
dirSize, err := fs.DiskUsage(false)
|
||||
|
||||
var size int64
|
||||
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
|
||||
return &Error{code: ErrCodeDiskSpace}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "format ") {
|
||||
return &Error{code: ErrCodeUnknownArchive}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DecompressFile will decompress a file in a given directory by using the
|
||||
// archiver tool to infer the file type and go from there. This will walk over
|
||||
// all of the files within the given archive and ensure that there is not a
|
||||
// zip-slip attack being attempted by validating that the final path is within
|
||||
// the server data directory.
|
||||
func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Ensure that the source archive actually exists on the system.
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
|
||||
// and then extract that file from the archive and write it to the disk. If any part of this process
|
||||
// encounters an error the entire process will be stopped.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
// Don't waste time with directories, we don't need to create them if they have no contents, and
|
||||
// we will ensure the directory exists when opening the file for writing anyways.
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name, err := system.ExtractArchiveSourceName(f, dir)
|
||||
if err != nil {
|
||||
return WrapError(err, filepath.Join(dir, f.Name()))
|
||||
}
|
||||
p := filepath.Join(dir, name)
|
||||
// If it is ignored, just don't do anything with the file and skip over it.
|
||||
if err := fs.IsIgnored(p); err != nil {
|
||||
return nil
|
||||
}
|
||||
if err := fs.Writefile(p, f); err != nil {
|
||||
return &Error{code: ErrCodeUnknownError, err: err, resolved: source}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "format ") {
|
||||
return &Error{code: ErrCodeUnknownArchive}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/mholt/archiver/v3"
|
||||
)
|
||||
|
||||
// SpaceAvailableForDecompression looks through a given archive and determines
|
||||
// if decompressing it would put the server over its allocated disk space limit.
|
||||
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error {
|
||||
// Don't waste time trying to determine this if we know the server will have the space for
|
||||
// it since there is no limit.
|
||||
if fs.MaxDisk() <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the cached size in a parallel process so that if it is not cached we are not
|
||||
// waiting an unnecessary amount of time on this call.
|
||||
dirSize, err := fs.DiskUsage(false)
|
||||
|
||||
var size int64
|
||||
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
|
||||
return &Error{code: ErrCodeDiskSpace}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "format ") {
|
||||
return &Error{code: ErrCodeUnknownArchive}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DecompressFile will decompress a file in a given directory by using the
|
||||
// archiver tool to infer the file type and go from there. This will walk over
|
||||
// all of the files within the given archive and ensure that there is not a
|
||||
// zip-slip attack being attempted by validating that the final path is within
|
||||
// the server data directory.
|
||||
func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Ensure that the source archive actually exists on the system.
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
|
||||
// and then extract that file from the archive and write it to the disk. If any part of this process
|
||||
// encounters an error the entire process will be stopped.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
// Don't waste time with directories, we don't need to create them if they have no contents, and
|
||||
// we will ensure the directory exists when opening the file for writing anyways.
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name, err := ExtractArchiveSourceName(f, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p := filepath.Join(dir, name)
|
||||
// If it is ignored, just don't do anything with the file and skip over it.
|
||||
if err := fs.IsIgnored(p); err != nil {
|
||||
return nil
|
||||
}
|
||||
if err := fs.Writefile(p, f); err != nil {
|
||||
return &Error{code: ErrCodeUnknownError, err: err, resolved: source}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "format ") {
|
||||
return &Error{code: ErrCodeUnknownArchive}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractArchiveSourceName looks for the provided archiver.File's name if it is
|
||||
// a type that is supported, otherwise it returns an error to the caller.
|
||||
func ExtractArchiveSourceName(f archiver.File, dir string) (name string, err error) {
|
||||
switch s := f.Sys().(type) {
|
||||
case *tar.Header:
|
||||
name = s.Name
|
||||
case *gzip.Header:
|
||||
name = s.Name
|
||||
case *zip.FileHeader:
|
||||
name = s.Name
|
||||
default:
|
||||
err = &Error{
|
||||
code: ErrCodeUnknownError,
|
||||
resolved: filepath.Join(dir, f.Name()),
|
||||
err: errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String())),
|
||||
}
|
||||
}
|
||||
return name, err
|
||||
}
|
||||
Reference in New Issue
Block a user