2020-09-27 19:24:08 +00:00
|
|
|
package filesystem
|
|
|
|
|
|
|
|
import (
|
2022-11-06 20:38:30 +00:00
|
|
|
"context"
|
2020-09-27 19:24:08 +00:00
|
|
|
"fmt"
|
2022-11-15 01:25:01 +00:00
|
|
|
"io"
|
2022-11-06 20:38:30 +00:00
|
|
|
iofs "io/fs"
|
2020-09-27 19:24:08 +00:00
|
|
|
"path"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
2021-01-18 05:05:51 +00:00
|
|
|
"sync/atomic"
|
2020-09-27 19:24:08 +00:00
|
|
|
"time"
|
2021-01-18 05:05:51 +00:00
|
|
|
|
2022-11-06 20:38:30 +00:00
|
|
|
"emperror.dev/errors"
|
2024-03-13 03:44:55 +00:00
|
|
|
"github.com/klauspost/compress/zip"
|
2022-11-06 20:38:30 +00:00
|
|
|
"github.com/mholt/archiver/v4"
|
2024-03-13 03:44:55 +00:00
|
|
|
|
|
|
|
"github.com/pterodactyl/wings/internal/ufs"
|
2024-03-17 20:58:30 +00:00
|
|
|
"github.com/pterodactyl/wings/server/filesystem/archiverext"
|
2020-09-27 19:24:08 +00:00
|
|
|
)
|
|
|
|
|
2022-11-15 01:25:01 +00:00
|
|
|
// CompressFiles compresses all the files matching the given paths in the
|
2021-01-18 05:05:51 +00:00
|
|
|
// specified directory. This function also supports passing nested paths to only
|
|
|
|
// compress certain files and folders when working in a larger directory. This
|
|
|
|
// effectively creates a local backup, but rather than ignoring specific files
|
|
|
|
// and folders, it takes an allow-list of files and folders.
|
2020-09-27 19:24:08 +00:00
|
|
|
//
|
2021-01-18 05:05:51 +00:00
|
|
|
// All paths are relative to the dir that is passed in as the first argument,
|
|
|
|
// and the compressed file will be placed at that location named
|
|
|
|
// `archive-{date}.tar.gz`.
|
2024-03-13 03:44:55 +00:00
|
|
|
func (fs *Filesystem) CompressFiles(dir string, paths []string) (ufs.FileInfo, error) {
|
|
|
|
a := &Archive{Filesystem: fs, BaseDirectory: dir, Files: paths}
|
|
|
|
d := path.Join(
|
|
|
|
dir,
|
|
|
|
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
|
|
|
|
)
|
|
|
|
f, err := fs.unixFS.OpenFile(d, ufs.O_WRONLY|ufs.O_CREATE, 0o644)
|
2020-09-27 19:24:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 03:44:55 +00:00
|
|
|
defer f.Close()
|
|
|
|
cw := ufs.NewCountedWriter(f)
|
|
|
|
if err := a.Stream(context.Background(), cw); err != nil {
|
|
|
|
return nil, err
|
2020-09-27 19:24:08 +00:00
|
|
|
}
|
2024-03-13 03:44:55 +00:00
|
|
|
if !fs.unixFS.CanFit(cw.BytesWritten()) {
|
|
|
|
_ = fs.unixFS.Remove(d)
|
|
|
|
return nil, newFilesystemError(ErrCodeDiskSpace, nil)
|
|
|
|
}
|
|
|
|
fs.unixFS.Add(cw.BytesWritten())
|
|
|
|
return f.Stat()
|
|
|
|
}
|
2020-09-27 19:24:08 +00:00
|
|
|
|
2024-03-13 03:44:55 +00:00
|
|
|
func (fs *Filesystem) archiverFileSystem(ctx context.Context, p string) (iofs.FS, error) {
|
|
|
|
f, err := fs.unixFS.Open(p)
|
2020-09-27 19:24:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-03-13 03:44:55 +00:00
|
|
|
// Do not use defer to close `f`, it will likely be used later.
|
2020-09-27 19:24:08 +00:00
|
|
|
|
2024-03-13 03:44:55 +00:00
|
|
|
format, _, err := archiver.Identify(filepath.Base(p), f)
|
|
|
|
if err != nil && !errors.Is(err, archiver.ErrNoMatch) {
|
|
|
|
_ = f.Close()
|
2020-11-28 23:57:10 +00:00
|
|
|
return nil, err
|
2020-09-27 19:24:08 +00:00
|
|
|
}
|
|
|
|
|
2024-03-13 03:44:55 +00:00
|
|
|
// Reset the file reader.
|
|
|
|
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
|
|
|
_ = f.Close()
|
2020-09-27 19:24:08 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-03-13 03:44:55 +00:00
|
|
|
info, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
_ = f.Close()
|
2020-09-27 19:24:08 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-03-13 03:44:55 +00:00
|
|
|
if format != nil {
|
|
|
|
switch ff := format.(type) {
|
|
|
|
case archiver.Zip:
|
|
|
|
// zip.Reader is more performant than ArchiveFS, because zip.Reader caches content information
|
|
|
|
// and zip.Reader can open several content files concurrently because of io.ReaderAt requirement
|
|
|
|
// while ArchiveFS can't.
|
|
|
|
// zip.Reader doesn't suffer from issue #330 and #310 according to local test (but they should be fixed anyway)
|
|
|
|
return zip.NewReader(f, info.Size())
|
|
|
|
case archiver.Archival:
|
|
|
|
return archiver.ArchiveFS{Stream: io.NewSectionReader(f, 0, info.Size()), Format: ff, Context: ctx}, nil
|
2024-03-17 20:58:30 +00:00
|
|
|
case archiver.Compression:
|
|
|
|
return archiverext.FileFS{File: f, Compression: ff}, nil
|
2024-03-13 03:44:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
_ = f.Close()
|
2024-03-17 20:58:30 +00:00
|
|
|
return nil, archiver.ErrNoMatch
|
2020-09-27 19:24:08 +00:00
|
|
|
}
|
2021-01-18 05:05:51 +00:00
|
|
|
|
|
|
|
// SpaceAvailableForDecompression looks through a given archive and determines
|
|
|
|
// if decompressing it would put the server over its allocated disk space limit.
|
2022-11-06 20:38:30 +00:00
|
|
|
func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir string, file string) error {
|
2021-01-18 05:05:51 +00:00
|
|
|
// Don't waste time trying to determine this if we know the server will have the space for
|
|
|
|
// it since there is no limit.
|
|
|
|
if fs.MaxDisk() <= 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-03-13 03:44:55 +00:00
|
|
|
fsys, err := fs.archiverFileSystem(ctx, filepath.Join(dir, file))
|
2021-01-18 05:05:51 +00:00
|
|
|
if err != nil {
|
2022-11-06 20:38:30 +00:00
|
|
|
if errors.Is(err, archiver.ErrNoMatch) {
|
2021-04-17 20:29:18 +00:00
|
|
|
return newFilesystemError(ErrCodeUnknownArchive, err)
|
2021-01-18 05:05:51 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2022-11-06 20:38:30 +00:00
|
|
|
|
2024-03-13 03:44:55 +00:00
|
|
|
var size atomic.Int64
|
2022-11-06 20:38:30 +00:00
|
|
|
return iofs.WalkDir(fsys, ".", func(path string, d iofs.DirEntry, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
// Stop walking if the context is canceled.
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
info, err := d.Info()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-03-13 03:44:55 +00:00
|
|
|
if !fs.unixFS.CanFit(size.Add(info.Size())) {
|
2022-11-06 20:38:30 +00:00
|
|
|
return newFilesystemError(ErrCodeDiskSpace, nil)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
})
|
2021-01-18 05:05:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DecompressFile will decompress a file in a given directory by using the
|
|
|
|
// archiver tool to infer the file type and go from there. This will walk over
|
2022-11-06 20:38:30 +00:00
|
|
|
// all the files within the given archive and ensure that there is not a
|
2021-01-18 05:05:51 +00:00
|
|
|
// zip-slip attack being attempted by validating that the final path is within
|
|
|
|
// the server data directory.
|
2022-11-06 20:38:30 +00:00
|
|
|
func (fs *Filesystem) DecompressFile(ctx context.Context, dir string, file string) error {
|
2024-03-13 03:44:55 +00:00
|
|
|
f, err := fs.unixFS.Open(filepath.Join(dir, file))
|
2021-01-18 05:05:51 +00:00
|
|
|
if err != nil {
|
2022-11-06 20:38:30 +00:00
|
|
|
return err
|
|
|
|
}
|
2023-01-18 01:34:08 +00:00
|
|
|
defer f.Close()
|
2022-11-06 20:38:30 +00:00
|
|
|
|
|
|
|
// Identify the type of archive we are dealing with.
|
|
|
|
format, input, err := archiver.Identify(filepath.Base(file), f)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, archiver.ErrNoMatch) {
|
2021-04-17 20:29:18 +00:00
|
|
|
return newFilesystemError(ErrCodeUnknownArchive, err)
|
2021-01-18 05:05:51 +00:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
2022-11-06 20:38:30 +00:00
|
|
|
|
2022-11-15 01:25:01 +00:00
|
|
|
return fs.extractStream(ctx, extractStreamOptions{
|
2024-05-08 04:06:15 +00:00
|
|
|
FileName: file,
|
2022-11-15 01:25:01 +00:00
|
|
|
Directory: dir,
|
|
|
|
Format: format,
|
|
|
|
Reader: input,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExtractStreamUnsafe .
|
|
|
|
func (fs *Filesystem) ExtractStreamUnsafe(ctx context.Context, dir string, r io.Reader) error {
|
|
|
|
format, input, err := archiver.Identify("archive.tar.gz", r)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, archiver.ErrNoMatch) {
|
|
|
|
return newFilesystemError(ErrCodeUnknownArchive, err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return fs.extractStream(ctx, extractStreamOptions{
|
|
|
|
Directory: dir,
|
|
|
|
Format: format,
|
|
|
|
Reader: input,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
type extractStreamOptions struct {
|
|
|
|
// The directory to extract the archive to.
|
|
|
|
Directory string
|
|
|
|
// File name of the archive.
|
|
|
|
FileName string
|
|
|
|
// Format of the archive.
|
|
|
|
Format archiver.Format
|
|
|
|
// Reader for the archive.
|
|
|
|
Reader io.Reader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fs *Filesystem) extractStream(ctx context.Context, opts extractStreamOptions) error {
|
2024-05-08 04:06:15 +00:00
|
|
|
|
|
|
|
// See if it's a compressed archive, such as TAR or a ZIP
|
2024-03-13 03:44:55 +00:00
|
|
|
ex, ok := opts.Format.(archiver.Extractor)
|
|
|
|
if !ok {
|
2024-05-08 04:06:15 +00:00
|
|
|
|
|
|
|
// If not, check if it's a single-file compression, such as
|
|
|
|
// .log.gz, .sql.gz, and so on
|
|
|
|
de, ok := opts.Format.(archiver.Decompressor)
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Strip the compression suffix
|
|
|
|
p := filepath.Join(opts.Directory, strings.TrimSuffix(opts.FileName, opts.Format.Name()))
|
|
|
|
|
|
|
|
// Make sure it's not ignored
|
|
|
|
if err := fs.IsIgnored(p); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
reader, err := de.OpenReader(opts.Reader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer reader.Close()
|
|
|
|
|
|
|
|
// Open the file for creation/writing
|
|
|
|
f, err := fs.unixFS.OpenFile(p, ufs.O_WRONLY|ufs.O_CREATE, 0o644)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
// Read in 4 KB chunks
|
|
|
|
buf := make([]byte, 4096)
|
|
|
|
for {
|
|
|
|
n, err := reader.Read(buf)
|
|
|
|
if n > 0 {
|
|
|
|
|
|
|
|
// Check quota before writing the chunk
|
|
|
|
if quotaErr := fs.HasSpaceFor(int64(n)); quotaErr != nil {
|
|
|
|
return quotaErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the chunk
|
|
|
|
if _, writeErr := f.Write(buf[:n]); writeErr != nil {
|
|
|
|
return writeErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add to quota
|
|
|
|
fs.addDisk(int64(n))
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
// EOF are expected
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return any other
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-13 03:44:55 +00:00
|
|
|
return nil
|
2022-11-06 20:38:30 +00:00
|
|
|
}
|
2024-05-08 04:06:15 +00:00
|
|
|
|
|
|
|
// Decompress and extract archive
|
2024-03-13 03:44:55 +00:00
|
|
|
return ex.Extract(ctx, opts.Reader, nil, func(ctx context.Context, f archiver.File) error {
|
|
|
|
if f.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
p := filepath.Join(opts.Directory, f.NameInArchive)
|
|
|
|
// If it is ignored, just don't do anything with the file and skip over it.
|
|
|
|
if err := fs.IsIgnored(p); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
r, err := f.Open()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer r.Close()
|
|
|
|
if err := fs.Write(p, r, f.Size(), f.Mode()); err != nil {
|
|
|
|
return wrapError(err, opts.FileName)
|
|
|
|
}
|
|
|
|
// Update the file modification time to the one set in the archive.
|
|
|
|
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
|
|
|
|
return wrapError(err, opts.FileName)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
2021-01-18 05:05:51 +00:00
|
|
|
}
|