Merge branch 'develop' of https://github.com/pterodactyl/wings into develop
This commit is contained in:
		
						commit
						522c6c17e4
					
				|  | @ -70,8 +70,8 @@ func (r *PanelRequest) ValidateSftpCredentials(request SftpAuthRequest) (*SftpAu | |||
| 		if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 { | ||||
| 			log.WithFields(log.Fields{ | ||||
| 				"subsystem": "sftp", | ||||
| 				"username": request.User, | ||||
| 				"ip": request.IP, | ||||
| 				"username":  request.User, | ||||
| 				"ip":        request.IP, | ||||
| 			}).Warn(r.Error().String()) | ||||
| 
 | ||||
| 			return nil, new(sftpInvalidCredentialsError) | ||||
|  |  | |||
|  | @ -63,7 +63,7 @@ type Configuration struct { | |||
| 
 | ||||
| 	// AllowedMounts is a list of allowed host-system mount points.
 | ||||
| 	// This is required to have the "Server Mounts" feature work properly.
 | ||||
| 	AllowedMounts []string `json:"allowed_mounts" yaml:"allowed_mounts"` | ||||
| 	AllowedMounts []string `json:"-" yaml:"allowed_mounts"` | ||||
| 
 | ||||
| 	// AllowedOrigins is a list of allowed request origins.
 | ||||
| 	// The Panel URL is automatically allowed, this is only needed for adding
 | ||||
|  |  | |||
|  | @ -364,7 +364,7 @@ func (e *Environment) ensureImageExists(image string) error { | |||
| 				log.WithFields(log.Fields{ | ||||
| 					"image":        image, | ||||
| 					"container_id": e.Id, | ||||
| 					"error":        errors.New(err.Error()), | ||||
| 					"err":          err.Error(), | ||||
| 				}).Warn("unable to pull requested image from remote source, however the image exists locally") | ||||
| 
 | ||||
| 				// Okay, we found a matching container image, in that case just go ahead and return
 | ||||
|  |  | |||
|  | @ -46,4 +46,4 @@ func (cp *CallbackPool) index(v reflect.Value) int { | |||
| 	} | ||||
| 
 | ||||
| 	return -1 | ||||
| } | ||||
| } | ||||
|  |  | |||
|  | @ -341,6 +341,16 @@ func postServerDecompressFiles(c *gin.Context) { | |||
| 
 | ||||
| 	hasSpace, err := s.Filesystem.SpaceAvailableForDecompression(data.RootPath, data.File) | ||||
| 	if err != nil { | ||||
| 		// Handle an unknown format error.
 | ||||
| 		if errors.Is(err, server.ErrUnknownArchiveFormat) { | ||||
| 			s.Log().WithField("error", err).Warn("failed to decompress file due to unknown format") | ||||
| 
 | ||||
| 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ | ||||
| 				"error": "unknown archive format", | ||||
| 			}) | ||||
| 			return | ||||
| 		} | ||||
| 
 | ||||
| 		TrackedServerError(err, s).AbortWithServerError(c) | ||||
| 		return | ||||
| 	} | ||||
|  |  | |||
|  | @ -38,9 +38,9 @@ type Handler struct { | |||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	ErrJwtNotPresent = errors.New("jwt: no jwt present") | ||||
| 	ErrJwtNotPresent    = errors.New("jwt: no jwt present") | ||||
| 	ErrJwtNoConnectPerm = errors.New("jwt: missing connect permission") | ||||
| 	ErrJwtUuidMismatch = errors.New("jwt: server uuid mismatch") | ||||
| 	ErrJwtUuidMismatch  = errors.New("jwt: server uuid mismatch") | ||||
| ) | ||||
| 
 | ||||
| func IsJwtError(err error) bool { | ||||
|  |  | |||
|  | @ -600,7 +600,15 @@ func (fs *Filesystem) Copy(p string) error { | |||
| 	base := filepath.Base(cleaned) | ||||
| 	relative := strings.TrimSuffix(strings.TrimPrefix(cleaned, fs.Path()), base) | ||||
| 	extension := filepath.Ext(base) | ||||
| 	name := strings.TrimSuffix(base, filepath.Ext(base)) | ||||
| 	name := strings.TrimSuffix(base, extension) | ||||
| 
 | ||||
| 	// Ensure that ".tar" is also counted as apart of the file extension.
 | ||||
| 	// There might be a better way to handle this for other double file extensions,
 | ||||
| 	// but this is a good workaround for now.
 | ||||
| 	if strings.HasSuffix(name, ".tar") { | ||||
| 		extension = ".tar" + extension | ||||
| 		name = strings.TrimSuffix(name, ".tar") | ||||
| 	} | ||||
| 
 | ||||
| 	// Begin looping up to 50 times to try and create a unique copy file name. This will take
 | ||||
| 	// an input of "file.txt" and generate "file copy.txt". If that name is already taken, it will
 | ||||
|  | @ -943,7 +951,7 @@ func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error { | |||
| } | ||||
| 
 | ||||
| type fileOpener struct { | ||||
| 	busy        uint | ||||
| 	busy uint | ||||
| } | ||||
| 
 | ||||
| // Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
 | ||||
|  | @ -966,4 +974,4 @@ func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File, | |||
| 
 | ||||
| 		return f, err | ||||
| 	} | ||||
| } | ||||
| } | ||||
|  |  | |||
|  | @ -10,9 +10,12 @@ import ( | |||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"sync/atomic" | ||||
| ) | ||||
| 
 | ||||
| var ErrUnknownArchiveFormat = errors.New("filesystem: unknown archive format") | ||||
| 
 | ||||
| // Look through a given archive and determine if decompressing it would put the server over
 | ||||
| // its allocated disk space limit.
 | ||||
| func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (bool, error) { | ||||
|  | @ -35,14 +38,21 @@ func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (b | |||
| 	var max = fs.Server.DiskSpace() * 1000.0 * 1000.0 | ||||
| 	// Walk over the archive and figure out just how large the final output would be from unarchiving it.
 | ||||
| 	err = archiver.Walk(source, func(f archiver.File) error { | ||||
| 		if atomic.AddInt64(&size, f.Size()) + dirSize > max { | ||||
| 		if atomic.AddInt64(&size, f.Size())+dirSize > max { | ||||
| 			return errors.WithStack(ErrNotEnoughDiskSpace) | ||||
| 		} | ||||
| 
 | ||||
| 		return nil | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		if strings.HasPrefix(err.Error(), "format ") { | ||||
| 			return false, errors.WithStack(ErrUnknownArchiveFormat) | ||||
| 		} | ||||
| 
 | ||||
| 	return err == nil, errors.WithStack(err) | ||||
| 		return false, errors.WithStack(err) | ||||
| 	} | ||||
| 
 | ||||
| 	return true, errors.WithStack(err) | ||||
| } | ||||
| 
 | ||||
| // Decompress a file in a given directory by using the archiver tool to infer the file
 | ||||
|  | @ -63,7 +73,7 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error { | |||
| 	// Walk over all of the files spinning up an additional go-routine for each file we've encountered
 | ||||
| 	// and then extract that file from the archive and write it to the disk. If any part of this process
 | ||||
| 	// encounters an error the entire process will be stopped.
 | ||||
| 	return archiver.Walk(source, func(f archiver.File) error { | ||||
| 	err = archiver.Walk(source, func(f archiver.File) error { | ||||
| 		// Don't waste time with directories, we don't need to create them if they have no contents, and
 | ||||
| 		// we will ensure the directory exists when opening the file for writing anyways.
 | ||||
| 		if f.IsDir() { | ||||
|  | @ -90,4 +100,13 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error { | |||
| 
 | ||||
| 		return errors.Wrap(fs.Writefile(p, f), "could not extract file from archive") | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		if strings.HasPrefix(err.Error(), "format ") { | ||||
| 			return errors.WithStack(ErrUnknownArchiveFormat) | ||||
| 		} | ||||
| 
 | ||||
| 		return errors.WithStack(err) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
|  |  | |||
|  | @ -23,7 +23,7 @@ var dockerEvents = []string{ | |||
| func (s *Server) StartEventListeners() { | ||||
| 	console := func(e events.Event) { | ||||
| 		t := s.Throttler() | ||||
| 		err := t.Increment(func () { | ||||
| 		err := t.Increment(func() { | ||||
| 			s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.") | ||||
| 		}) | ||||
| 
 | ||||
|  |  | |||
|  | @ -17,4 +17,4 @@ func (ab *AtomicBool) Set(v bool) { | |||
| 
 | ||||
| func (ab *AtomicBool) Get() bool { | ||||
| 	return atomic.LoadUint32(&ab.flag) == 1 | ||||
| } | ||||
| } | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue
	
	Block a user