Merge branch 'develop' of https://github.com/pterodactyl/wings into develop

This commit is contained in:
Dane Everitt 2020-09-22 20:41:16 -07:00
commit 522c6c17e4
No known key found for this signature in database
GPG Key ID: EEA66103B3D71F53
10 changed files with 52 additions and 15 deletions

View File

@ -70,8 +70,8 @@ func (r *PanelRequest) ValidateSftpCredentials(request SftpAuthRequest) (*SftpAu
if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 { if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"subsystem": "sftp", "subsystem": "sftp",
"username": request.User, "username": request.User,
"ip": request.IP, "ip": request.IP,
}).Warn(r.Error().String()) }).Warn(r.Error().String())
return nil, new(sftpInvalidCredentialsError) return nil, new(sftpInvalidCredentialsError)

View File

@ -63,7 +63,7 @@ type Configuration struct {
// AllowedMounts is a list of allowed host-system mount points. // AllowedMounts is a list of allowed host-system mount points.
// This is required to have the "Server Mounts" feature work properly. // This is required to have the "Server Mounts" feature work properly.
AllowedMounts []string `json:"allowed_mounts" yaml:"allowed_mounts"` AllowedMounts []string `json:"-" yaml:"allowed_mounts"`
// AllowedOrigins is a list of allowed request origins. // AllowedOrigins is a list of allowed request origins.
// The Panel URL is automatically allowed, this is only needed for adding // The Panel URL is automatically allowed, this is only needed for adding

View File

@ -364,7 +364,7 @@ func (e *Environment) ensureImageExists(image string) error {
log.WithFields(log.Fields{ log.WithFields(log.Fields{
"image": image, "image": image,
"container_id": e.Id, "container_id": e.Id,
"error": errors.New(err.Error()), "err": err.Error(),
}).Warn("unable to pull requested image from remote source, however the image exists locally") }).Warn("unable to pull requested image from remote source, however the image exists locally")
// Okay, we found a matching container image, in that case just go ahead and return // Okay, we found a matching container image, in that case just go ahead and return

View File

@ -46,4 +46,4 @@ func (cp *CallbackPool) index(v reflect.Value) int {
} }
return -1 return -1
} }

View File

@ -341,6 +341,16 @@ func postServerDecompressFiles(c *gin.Context) {
hasSpace, err := s.Filesystem.SpaceAvailableForDecompression(data.RootPath, data.File) hasSpace, err := s.Filesystem.SpaceAvailableForDecompression(data.RootPath, data.File)
if err != nil { if err != nil {
// Handle an unknown format error.
if errors.Is(err, server.ErrUnknownArchiveFormat) {
s.Log().WithField("error", err).Warn("failed to decompress file due to unknown format")
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "unknown archive format",
})
return
}
TrackedServerError(err, s).AbortWithServerError(c) TrackedServerError(err, s).AbortWithServerError(c)
return return
} }

View File

@ -38,9 +38,9 @@ type Handler struct {
} }
var ( var (
ErrJwtNotPresent = errors.New("jwt: no jwt present") ErrJwtNotPresent = errors.New("jwt: no jwt present")
ErrJwtNoConnectPerm = errors.New("jwt: missing connect permission") ErrJwtNoConnectPerm = errors.New("jwt: missing connect permission")
ErrJwtUuidMismatch = errors.New("jwt: server uuid mismatch") ErrJwtUuidMismatch = errors.New("jwt: server uuid mismatch")
) )
func IsJwtError(err error) bool { func IsJwtError(err error) bool {

View File

@ -600,7 +600,15 @@ func (fs *Filesystem) Copy(p string) error {
base := filepath.Base(cleaned) base := filepath.Base(cleaned)
relative := strings.TrimSuffix(strings.TrimPrefix(cleaned, fs.Path()), base) relative := strings.TrimSuffix(strings.TrimPrefix(cleaned, fs.Path()), base)
extension := filepath.Ext(base) extension := filepath.Ext(base)
name := strings.TrimSuffix(base, filepath.Ext(base)) name := strings.TrimSuffix(base, extension)
// Ensure that ".tar" is also counted as apart of the file extension.
// There might be a better way to handle this for other double file extensions,
// but this is a good workaround for now.
if strings.HasSuffix(name, ".tar") {
extension = ".tar" + extension
name = strings.TrimSuffix(name, ".tar")
}
// Begin looping up to 50 times to try and create a unique copy file name. This will take // Begin looping up to 50 times to try and create a unique copy file name. This will take
// an input of "file.txt" and generate "file copy.txt". If that name is already taken, it will // an input of "file.txt" and generate "file copy.txt". If that name is already taken, it will
@ -943,7 +951,7 @@ func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
} }
type fileOpener struct { type fileOpener struct {
busy uint busy uint
} }
// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file // Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
@ -966,4 +974,4 @@ func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File,
return f, err return f, err
} }
} }

View File

@ -10,9 +10,12 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"strings"
"sync/atomic" "sync/atomic"
) )
var ErrUnknownArchiveFormat = errors.New("filesystem: unknown archive format")
// Look through a given archive and determine if decompressing it would put the server over // Look through a given archive and determine if decompressing it would put the server over
// its allocated disk space limit. // its allocated disk space limit.
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (bool, error) { func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (bool, error) {
@ -35,14 +38,21 @@ func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (b
var max = fs.Server.DiskSpace() * 1000.0 * 1000.0 var max = fs.Server.DiskSpace() * 1000.0 * 1000.0
// Walk over the archive and figure out just how large the final output would be from unarchiving it. // Walk over the archive and figure out just how large the final output would be from unarchiving it.
err = archiver.Walk(source, func(f archiver.File) error { err = archiver.Walk(source, func(f archiver.File) error {
if atomic.AddInt64(&size, f.Size()) + dirSize > max { if atomic.AddInt64(&size, f.Size())+dirSize > max {
return errors.WithStack(ErrNotEnoughDiskSpace) return errors.WithStack(ErrNotEnoughDiskSpace)
} }
return nil return nil
}) })
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return false, errors.WithStack(ErrUnknownArchiveFormat)
}
return err == nil, errors.WithStack(err) return false, errors.WithStack(err)
}
return true, errors.WithStack(err)
} }
// Decompress a file in a given directory by using the archiver tool to infer the file // Decompress a file in a given directory by using the archiver tool to infer the file
@ -63,7 +73,7 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
// Walk over all of the files spinning up an additional go-routine for each file we've encountered // Walk over all of the files spinning up an additional go-routine for each file we've encountered
// and then extract that file from the archive and write it to the disk. If any part of this process // and then extract that file from the archive and write it to the disk. If any part of this process
// encounters an error the entire process will be stopped. // encounters an error the entire process will be stopped.
return archiver.Walk(source, func(f archiver.File) error { err = archiver.Walk(source, func(f archiver.File) error {
// Don't waste time with directories, we don't need to create them if they have no contents, and // Don't waste time with directories, we don't need to create them if they have no contents, and
// we will ensure the directory exists when opening the file for writing anyways. // we will ensure the directory exists when opening the file for writing anyways.
if f.IsDir() { if f.IsDir() {
@ -90,4 +100,13 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
return errors.Wrap(fs.Writefile(p, f), "could not extract file from archive") return errors.Wrap(fs.Writefile(p, f), "could not extract file from archive")
}) })
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return errors.WithStack(ErrUnknownArchiveFormat)
}
return errors.WithStack(err)
}
return nil
} }

View File

@ -23,7 +23,7 @@ var dockerEvents = []string{
func (s *Server) StartEventListeners() { func (s *Server) StartEventListeners() {
console := func(e events.Event) { console := func(e events.Event) {
t := s.Throttler() t := s.Throttler()
err := t.Increment(func () { err := t.Increment(func() {
s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.") s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.")
}) })

View File

@ -17,4 +17,4 @@ func (ab *AtomicBool) Set(v bool) {
func (ab *AtomicBool) Get() bool { func (ab *AtomicBool) Get() bool {
return atomic.LoadUint32(&ab.flag) == 1 return atomic.LoadUint32(&ab.flag) == 1
} }