wings/router/router_server_files.go

566 lines
14 KiB
Go
Raw Normal View History

2020-04-06 01:00:33 +00:00
package router
import (
"context"
2020-07-12 22:43:25 +00:00
"mime/multipart"
2020-04-06 01:00:33 +00:00
"net/http"
"net/url"
2020-04-06 01:00:33 +00:00
"os"
"path"
2020-07-12 22:43:25 +00:00
"path/filepath"
2020-04-06 01:00:33 +00:00
"strconv"
"strings"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/router/downloader"
"github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/filesystem"
"golang.org/x/sync/errgroup"
2020-04-06 01:00:33 +00:00
)
// Returns the contents of a file on the server.
func getServerFileContents(c *gin.Context) {
2020-12-25 21:57:26 +00:00
s := ExtractServer(c)
f := c.Query("file")
2020-12-17 05:38:56 +00:00
p := "/" + strings.TrimLeft(f, "/")
st, err := s.Filesystem().Stat(p)
2020-04-06 01:00:33 +00:00
if err != nil {
2020-12-25 21:57:26 +00:00
WithError(c, err)
2020-04-06 01:00:33 +00:00
return
}
c.Header("X-Mime-Type", st.Mimetype)
c.Header("Content-Length", strconv.Itoa(int(st.Info.Size())))
// If a download parameter is included in the URL go ahead and attach the necessary headers
// so that the file can be downloaded.
if c.Query("download") != "" {
c.Header("Content-Disposition", "attachment; filename="+st.Info.Name())
c.Header("Content-Type", "application/octet-stream")
}
// TODO(dane): should probably come up with a different approach here. If an error is encountered
// by this Readfile call you'll end up causing a (recovered) panic in the program because so many
// headers have already been set. We should probably add a RawReadfile that just returns the file
// to be read and then we can stream from that safely without error.
//
// Until that becomes a problem though I'm just going to leave this how it is. The panic is recovered
// and a normal 500 error is returned to the client to my knowledge. It is also very unlikely to
// happen since we're doing so much before this point that would normally throw an error if there
// was a problem with the file.
if err := s.Filesystem().Readfile(p, c.Writer); err != nil {
2020-12-25 21:57:26 +00:00
WithError(c, err)
return
}
2020-12-25 21:57:26 +00:00
c.Writer.Flush()
2020-04-06 01:00:33 +00:00
}
// Returns the contents of a directory for a server.
func getServerListDirectory(c *gin.Context) {
s := ExtractServer(c)
dir := c.Query("directory")
2020-12-17 05:38:56 +00:00
if stats, err := s.Filesystem().ListDirectory(dir); err != nil {
WithError(c, err)
} else {
c.JSON(http.StatusOK, stats)
2020-04-06 01:00:33 +00:00
}
}
type renameFile struct {
To string `json:"to"`
From string `json:"from"`
}
// Renames (or moves) files for a server.
func putServerRenameFiles(c *gin.Context) {
2020-04-06 01:00:33 +00:00
s := GetServer(c.Param("server"))
var data struct {
Root string `json:"root"`
Files []renameFile `json:"files"`
2020-04-06 01:00:33 +00:00
}
2020-05-29 15:44:49 +00:00
// BindJSON sends 400 if the request fails, all we need to do is return
if err := c.BindJSON(&data); err != nil {
return
}
2020-04-06 01:00:33 +00:00
if len(data.Files) == 0 {
2020-04-06 01:00:33 +00:00
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
"error": "No files to move or rename were provided.",
2020-04-06 01:00:33 +00:00
})
return
}
g, ctx := errgroup.WithContext(context.Background())
// Loop over the array of files passed in and perform the move or rename action against each.
for _, p := range data.Files {
pf := path.Join(data.Root, p.From)
pt := path.Join(data.Root, p.To)
g.Go(func() error {
select {
case <-ctx.Done():
return ctx.Err()
default:
if err := s.Filesystem().Rename(pf, pt); err != nil {
// Return nil if the error is an is not exists.
// NOTE: os.IsNotExist() does not work if the error is wrapped.
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
return nil
}
})
}
if err := g.Wait(); err != nil {
if errors.Is(err, os.ErrExist) {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Cannot move or rename file, destination already exists.",
})
return
}
2020-12-16 05:08:00 +00:00
NewServerError(err, s).AbortFilesystemError(c)
2020-04-06 01:00:33 +00:00
return
}
c.Status(http.StatusNoContent)
}
// Copies a server file.
func postServerCopyFile(c *gin.Context) {
s := GetServer(c.Param("server"))
var data struct {
Location string `json:"location"`
}
2020-05-29 15:44:49 +00:00
// BindJSON sends 400 if the request fails, all we need to do is return
if err := c.BindJSON(&data); err != nil {
return
}
2020-04-06 01:00:33 +00:00
if err := s.Filesystem().Copy(data.Location); err != nil {
2020-12-16 05:08:00 +00:00
NewServerError(err, s).AbortFilesystemError(c)
return
2020-04-06 01:00:33 +00:00
}
c.Status(http.StatusNoContent)
}
// Deletes files from a server.
func postServerDeleteFiles(c *gin.Context) {
2020-04-06 01:00:33 +00:00
s := GetServer(c.Param("server"))
var data struct {
Root string `json:"root"`
Files []string `json:"files"`
2020-04-06 01:00:33 +00:00
}
2020-05-29 15:44:49 +00:00
if err := c.BindJSON(&data); err != nil {
return
}
2020-04-06 01:00:33 +00:00
if len(data.Files) == 0 {
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
"error": "No files were specified for deletion.",
})
return
}
g, ctx := errgroup.WithContext(context.Background())
// Loop over the array of files passed in and delete them. If any of the file deletions
// fail just abort the process entirely.
for _, p := range data.Files {
pi := path.Join(data.Root, p)
g.Go(func() error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return s.Filesystem().Delete(pi)
}
})
}
if err := g.Wait(); err != nil {
2020-12-16 05:08:00 +00:00
NewServerError(err, s).Abort(c)
2020-04-06 01:00:33 +00:00
return
}
c.Status(http.StatusNoContent)
}
// Writes the contents of the request to a file on a server.
func postServerWriteFile(c *gin.Context) {
s := GetServer(c.Param("server"))
f := c.Query("file")
f = "/" + strings.TrimLeft(f, "/")
if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil {
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
})
return
}
2020-12-16 05:08:00 +00:00
NewServerError(err, s).AbortFilesystemError(c)
2020-04-06 01:00:33 +00:00
return
}
c.Status(http.StatusNoContent)
}
// Returns all of the currently in-progress file downloads and their current download
// progress. The progress is also pushed out via a websocket event allowing you to just
// call this once to get current downloads, and then listen to targeted websocket events
// with the current progress for everything.
func getServerPullingFiles(c *gin.Context) {
s := ExtractServer(c)
c.JSON(http.StatusOK, gin.H{
"downloads": downloader.ByServer(s.Id()),
})
}
2020-12-16 04:19:09 +00:00
// Writes the contents of the remote URL to a file on a server.
func postServerPullRemoteFile(c *gin.Context) {
2020-12-16 04:19:09 +00:00
s := ExtractServer(c)
var data struct {
URL string `binding:"required" json:"url"`
Directory string `binding:"required,omitempty" json:"directory"`
2020-12-16 04:19:09 +00:00
}
if err := c.BindJSON(&data); err != nil {
return
}
u, err := url.Parse(data.URL)
if err != nil {
if e, ok := err.(*url.Error); ok {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "An error occurred while parsing that URL: " + e.Err.Error(),
})
return
}
WithError(c, err)
2020-12-16 04:19:09 +00:00
return
}
if err := s.Filesystem().HasSpaceErr(true); err != nil {
WithError(c, err)
return
}
// Do not allow more than three simultaneous remote file downloads at one time.
if len(downloader.ByServer(s.Id())) >= 3 {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "This server has reached its limit of 3 simultaneous remote file downloads at once. Please wait for one to complete before trying again.",
})
return
}
dl := downloader.New(s, downloader.DownloadRequest{
URL: u,
Directory: data.Directory,
})
2020-12-16 04:19:09 +00:00
// Execute this pull in a seperate thread since it may take a long time to complete.
go func() {
s.Log().WithField("download_id", dl.Identifier).WithField("url", u.String()).Info("starting pull of remote file to disk")
if err := dl.Execute(); err != nil {
s.Log().WithField("download_id", dl.Identifier).WithField("error", err).Error("failed to pull remote file")
} else {
s.Log().WithField("download_id", dl.Identifier).Info("completed pull of remote file")
}
}()
2020-12-16 04:19:09 +00:00
c.JSON(http.StatusAccepted, gin.H{
"identifier": dl.Identifier,
})
}
// Stops a remote file download if it exists and belongs to this server.
func deleteServerPullRemoteFile(c *gin.Context) {
s := ExtractServer(c)
if dl := downloader.ByID(c.Param("download")); dl != nil && dl.BelongsTo(s) {
dl.Cancel()
}
2020-12-16 04:19:09 +00:00
c.Status(http.StatusNoContent)
}
2020-04-06 01:00:33 +00:00
// Create a directory on a server.
func postServerCreateDirectory(c *gin.Context) {
s := GetServer(c.Param("server"))
var data struct {
Name string `json:"name"`
Path string `json:"path"`
}
2020-05-29 15:44:49 +00:00
// BindJSON sends 400 if the request fails, all we need to do is return
if err := c.BindJSON(&data); err != nil {
return
}
2020-04-06 01:00:33 +00:00
if err := s.Filesystem().CreateDirectory(data.Name, data.Path); err != nil {
if err.Error() == "not a directory" {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Part of the path being created is not a directory (ENOTDIR).",
})
return
}
2020-12-16 05:08:00 +00:00
NewServerError(err, s).Abort(c)
2020-04-06 01:00:33 +00:00
return
}
c.Status(http.StatusNoContent)
}
func postServerCompressFiles(c *gin.Context) {
s := GetServer(c.Param("server"))
var data struct {
RootPath string `json:"root"`
2020-07-11 20:28:17 +00:00
Files []string `json:"files"`
}
if err := c.BindJSON(&data); err != nil {
return
}
2020-07-11 20:38:25 +00:00
if len(data.Files) == 0 {
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
"error": "No files were passed through to be compressed.",
})
return
}
if !s.Filesystem().HasSpaceAvailable(true) {
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
"error": "This server does not have enough available disk space to generate a compressed archive.",
})
return
}
f, err := s.Filesystem().CompressFiles(data.RootPath, data.Files)
if err != nil {
2020-12-16 05:08:00 +00:00
NewServerError(err, s).AbortFilesystemError(c)
return
}
c.JSON(http.StatusOK, &filesystem.Stat{
Info: f,
Mimetype: "application/tar+gzip",
})
}
func postServerDecompressFiles(c *gin.Context) {
s := GetServer(c.Param("server"))
var data struct {
RootPath string `json:"root"`
File string `json:"file"`
}
if err := c.BindJSON(&data); err != nil {
return
}
hasSpace, err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File)
if err != nil {
2020-09-20 18:51:12 +00:00
// Handle an unknown format error.
if filesystem.IsErrorCode(err, filesystem.ErrCodeUnknownArchive) {
2020-09-20 18:51:12 +00:00
s.Log().WithField("error", err).Warn("failed to decompress file due to unknown format")
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "unknown archive format",
})
return
}
2020-12-16 05:08:00 +00:00
NewServerError(err, s).Abort(c)
return
}
if !hasSpace {
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
"error": "This server does not have enough available disk space to decompress this archive.",
})
return
}
if err := s.Filesystem().DecompressFile(data.RootPath, data.File); err != nil {
if errors.Is(err, os.ErrNotExist) {
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
"error": "The requested archive was not found.",
})
return
}
// If the file is busy for some reason just return a nicer error to the user since there is not
// much we specifically can do. They'll need to stop the running server process in order to overwrite
// a file like this.
if strings.Contains(err.Error(), "text file busy") {
s.Log().WithField("error", err).Warn("failed to decompress file due to busy text file")
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "One or more files this archive is attempting to overwrite are currently in use by another process. Please try again.",
})
return
}
2020-12-16 05:08:00 +00:00
NewServerError(err, s).AbortFilesystemError(c)
return
}
c.Status(http.StatusNoContent)
}
2020-11-29 20:07:45 +00:00
type chmodFile struct {
2020-11-29 20:44:28 +00:00
File string `json:"file"`
Mode string `json:"mode"`
2020-11-29 20:07:45 +00:00
}
var errInvalidFileMode = errors.New("invalid file mode")
2020-11-29 20:07:45 +00:00
func postServerChmodFile(c *gin.Context) {
s := GetServer(c.Param("server"))
var data struct {
Root string `json:"root"`
Files []chmodFile `json:"files"`
}
if err := c.BindJSON(&data); err != nil {
2020-11-29 20:44:28 +00:00
log.Debug(err.Error())
2020-11-29 20:07:45 +00:00
return
}
if len(data.Files) == 0 {
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
"error": "No files to chmod were provided.",
})
return
}
g, ctx := errgroup.WithContext(context.Background())
// Loop over the array of files passed in and perform the move or rename action against each.
for _, p := range data.Files {
g.Go(func() error {
select {
case <-ctx.Done():
return ctx.Err()
default:
2020-11-29 20:48:15 +00:00
mode, err := strconv.ParseUint(p.Mode, 8, 32)
2020-11-29 20:44:28 +00:00
if err != nil {
return errInvalidFileMode
2020-11-29 20:44:28 +00:00
}
2020-11-29 20:48:15 +00:00
if err := s.Filesystem().Chmod(path.Join(data.Root, p.File), os.FileMode(mode)); err != nil {
2020-11-29 20:07:45 +00:00
// Return nil if the error is an is not exists.
// NOTE: os.IsNotExist() does not work if the error is wrapped.
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
return nil
}
})
}
if err := g.Wait(); err != nil {
if errors.Is(err, errInvalidFileMode) {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Invalid file mode.",
})
return
}
2020-12-16 05:08:00 +00:00
NewServerError(err, s).AbortFilesystemError(c)
2020-11-29 20:07:45 +00:00
return
}
c.Status(http.StatusNoContent)
}
2020-07-12 22:43:25 +00:00
func postServerUploadFiles(c *gin.Context) {
token := tokens.UploadPayload{}
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
2020-12-16 05:08:00 +00:00
NewTrackedError(err).Abort(c)
2020-07-12 22:43:25 +00:00
return
}
s := GetServer(token.ServerUuid)
if s == nil || !token.IsUniqueRequest() {
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
"error": "The requested resource was not found on this server.",
})
return
}
form, err := c.MultipartForm()
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Failed to get multipart form data from request.",
2020-07-12 22:43:25 +00:00
})
return
}
headers, ok := form.File["files"]
if !ok {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "No files were found on the request body.",
2020-07-12 22:43:25 +00:00
})
return
}
directory := c.Query("directory")
var totalSize int64
for _, header := range headers {
totalSize += header.Size
}
2020-07-12 22:43:25 +00:00
for _, header := range headers {
p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename))
2020-07-12 22:43:25 +00:00
if err != nil {
2020-12-16 05:08:00 +00:00
NewServerError(err, s).AbortFilesystemError(c)
2020-07-12 22:43:25 +00:00
return
}
// We run this in a different method so I can use defer without any of
// the consequences caused by calling it in a loop.
if err := handleFileUpload(p, s, header); err != nil {
2020-12-16 05:08:00 +00:00
NewServerError(err, s).AbortFilesystemError(c)
2020-07-12 22:43:25 +00:00
return
}
}
}
func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader) error {
file, err := header.Open()
if err != nil {
return err
2020-07-12 22:43:25 +00:00
}
defer file.Close()
if err := s.Filesystem().Writefile(p, file); err != nil {
return err
2020-07-12 22:43:25 +00:00
}
return nil
}