Support unarching in a better fashion with zip-slip protections and size checking

This commit is contained in:
Dane Everitt
2020-07-15 21:16:08 -07:00
parent ae46add8ef
commit a635cdd6b2
3 changed files with 183 additions and 40 deletions

View File

@@ -7,7 +7,6 @@ import (
"encoding/json"
"fmt"
"github.com/gabriel-vasile/mimetype"
"github.com/mholt/archiver/v3"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/server/backup"
@@ -165,34 +164,11 @@ func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
func (fs *Filesystem) HasSpaceAvailable() bool {
space := fs.Server.Build.DiskSpace
// If we have a match in the cache, use that value in the return. No need to perform an expensive
// disk operation, even if this is an empty value.
if x, exists := fs.Server.Cache.Get("disk_used"); exists {
fs.Server.Resources.Disk = x.(int64)
// This check is here to ensure that true is always returned if the server has unlimited disk space.
// See the end of this method for more information (the other `if space <= 0`).
if space <= 0 {
return true
}
return (x.(int64) / 1000.0 / 1000.0) <= space
}
// If there is no size its either because there is no data (in which case running this function
// will have effectively no impact), or there is nothing in the cache, in which case we need to
// grab the size of their data directory. This is a taxing operation, so we want to store it in
// the cache once we've gotten it.
size, err := fs.DirectorySize("/")
size, err := fs.getCachedDiskUsage()
if err != nil {
fs.Server.Log().WithField("error", err).Warn("failed to determine root server directory size")
}
// Always cache the size, even if there is an error. We want to always return that value
// so that we don't cause an endless loop of determining the disk size if there is a temporary
// error encountered.
fs.Server.Cache.Set("disk_used", size, time.Second*60)
// Determine if their folder size, in bytes, is smaller than the amount of space they've
// been allocated.
fs.Server.Resources.Disk = size
@@ -209,6 +185,29 @@ func (fs *Filesystem) HasSpaceAvailable() bool {
return (size / 1000.0 / 1000.0) <= space
}
// Internal helper function to allow other parts of the codebase to check the total used disk space
// as needed without overly taxing the system. This will prioritize the value from the cache to avoid
// excessive IO usage. We will only walk the filesystem and determine the size of the directory if there
// is no longer a cached value.
func (fs *Filesystem) getCachedDiskUsage() (int64, error) {
if x, exists := fs.Server.Cache.Get("disk_used"); exists {
return x.(int64), nil
}
// If there is no size its either because there is no data (in which case running this function
// will have effectively no impact), or there is nothing in the cache, in which case we need to
// grab the size of their data directory. This is a taxing operation, so we want to store it in
// the cache once we've gotten it.
size, err := fs.DirectorySize("/")
// Always cache the size, even if there is an error. We want to always return that value
// so that we don't cause an endless loop of determining the disk size if there is a temporary
// error encountered.
fs.Server.Cache.Set("disk_used", size, time.Second*60)
return size, err
}
// Determines the directory size of a given location by running parallel tasks to iterate
// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
// on locations with tons of files, so it is recommended that you cache the output.
@@ -743,16 +742,3 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
return a.Create(d, context.Background())
}
// Decompress a file in a given directory by using the archiver tool to infer the file
// type and go from there.
func (fs *Filesystem) DecompressFile(dir string, file string) error {
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return errors.WithStack(err)
}
dest := strings.TrimSuffix(source, filepath.Base(source))
return archiver.Unarchive(source, dest)
}