2020-04-06 01:00:33 +00:00
|
|
|
package router
|
|
|
|
|
|
|
|
import (
|
2021-01-16 20:03:55 +00:00
|
|
|
"bufio"
|
2020-07-11 22:33:53 +00:00
|
|
|
"context"
|
2021-04-04 17:42:03 +00:00
|
|
|
"io"
|
2020-07-12 22:43:25 +00:00
|
|
|
"mime/multipart"
|
2020-04-06 01:00:33 +00:00
|
|
|
"net/http"
|
2020-05-17 22:07:11 +00:00
|
|
|
"net/url"
|
2020-04-06 01:00:33 +00:00
|
|
|
"os"
|
2020-07-11 22:33:53 +00:00
|
|
|
"path"
|
2020-07-12 22:43:25 +00:00
|
|
|
"path/filepath"
|
2020-04-06 01:00:33 +00:00
|
|
|
"strconv"
|
2020-05-17 22:07:11 +00:00
|
|
|
"strings"
|
2021-01-04 01:20:16 +00:00
|
|
|
|
2022-02-23 22:01:03 +00:00
|
|
|
"github.com/pterodactyl/wings/config"
|
|
|
|
|
2021-01-04 01:20:16 +00:00
|
|
|
"emperror.dev/errors"
|
|
|
|
"github.com/apex/log"
|
|
|
|
"github.com/gin-gonic/gin"
|
2021-08-02 21:07:00 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2021-01-04 01:20:16 +00:00
|
|
|
"github.com/pterodactyl/wings/router/downloader"
|
2021-01-16 19:02:57 +00:00
|
|
|
"github.com/pterodactyl/wings/router/middleware"
|
2021-01-04 01:20:16 +00:00
|
|
|
"github.com/pterodactyl/wings/router/tokens"
|
|
|
|
"github.com/pterodactyl/wings/server"
|
|
|
|
"github.com/pterodactyl/wings/server/filesystem"
|
2020-04-06 01:00:33 +00:00
|
|
|
)
|
|
|
|
|
2021-01-16 20:03:55 +00:00
|
|
|
// getServerFileContents returns the contents of a file on the server.
|
2020-04-06 01:00:33 +00:00
|
|
|
func getServerFileContents(c *gin.Context) {
|
2021-01-16 20:03:55 +00:00
|
|
|
s := middleware.ExtractServer(c)
|
|
|
|
p := "/" + strings.TrimLeft(c.Query("file"), "/")
|
|
|
|
f, st, err := s.Filesystem().File(p)
|
2020-04-06 01:00:33 +00:00
|
|
|
if err != nil {
|
2021-01-16 20:03:55 +00:00
|
|
|
middleware.CaptureAndAbort(c, err)
|
2020-04-06 01:00:33 +00:00
|
|
|
return
|
|
|
|
}
|
2021-01-16 20:03:55 +00:00
|
|
|
defer f.Close()
|
2020-04-06 01:00:33 +00:00
|
|
|
|
2020-11-09 01:06:40 +00:00
|
|
|
c.Header("X-Mime-Type", st.Mimetype)
|
2021-01-16 20:03:55 +00:00
|
|
|
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
2020-11-09 01:06:40 +00:00
|
|
|
// If a download parameter is included in the URL go ahead and attach the necessary headers
|
|
|
|
// so that the file can be downloaded.
|
|
|
|
if c.Query("download") != "" {
|
2021-02-21 06:41:50 +00:00
|
|
|
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
|
2020-11-09 01:06:40 +00:00
|
|
|
c.Header("Content-Type", "application/octet-stream")
|
|
|
|
}
|
2021-01-16 20:03:55 +00:00
|
|
|
defer c.Writer.Flush()
|
2021-04-04 17:42:03 +00:00
|
|
|
// If you don't do a limited reader here you will trigger a panic on write when
|
|
|
|
// a different server process writes content to the file after you've already
|
|
|
|
// determined the file size. This could lead to some weird content output but
|
|
|
|
// it would technically be accurate based on the content at the time of the request.
|
|
|
|
//
|
|
|
|
// "http: wrote more than the declared Content-Length"
|
|
|
|
//
|
|
|
|
// @see https://github.com/pterodactyl/panel/issues/3131
|
|
|
|
r := io.LimitReader(f, st.Size())
|
|
|
|
if _, err = bufio.NewReader(r).WriteTo(c.Writer); err != nil {
|
2021-01-16 20:03:55 +00:00
|
|
|
// Pretty sure this will unleash chaos on the response, but its a risk we can
|
|
|
|
// take since a panic will at least be recovered and this should be incredibly
|
|
|
|
// rare?
|
|
|
|
middleware.CaptureAndAbort(c, err)
|
2020-10-01 04:46:32 +00:00
|
|
|
return
|
|
|
|
}
|
2020-04-06 01:00:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the contents of a directory for a server.
|
|
|
|
func getServerListDirectory(c *gin.Context) {
|
2020-12-16 04:51:13 +00:00
|
|
|
s := ExtractServer(c)
|
2021-01-04 01:20:16 +00:00
|
|
|
dir := c.Query("directory")
|
2020-12-17 05:38:56 +00:00
|
|
|
if stats, err := s.Filesystem().ListDirectory(dir); err != nil {
|
2020-12-16 05:53:34 +00:00
|
|
|
WithError(c, err)
|
|
|
|
} else {
|
|
|
|
c.JSON(http.StatusOK, stats)
|
2020-04-06 01:00:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-11 23:00:39 +00:00
|
|
|
type renameFile struct {
|
|
|
|
To string `json:"to"`
|
|
|
|
From string `json:"from"`
|
|
|
|
}
|
|
|
|
|
|
|
|
// Renames (or moves) files for a server.
|
|
|
|
func putServerRenameFiles(c *gin.Context) {
|
2021-01-08 23:14:56 +00:00
|
|
|
s := ExtractServer(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
|
2020-05-17 22:07:11 +00:00
|
|
|
var data struct {
|
2020-07-11 23:00:39 +00:00
|
|
|
Root string `json:"root"`
|
|
|
|
Files []renameFile `json:"files"`
|
2020-04-06 01:00:33 +00:00
|
|
|
}
|
2020-05-29 15:44:49 +00:00
|
|
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
|
|
|
if err := c.BindJSON(&data); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2020-04-06 01:00:33 +00:00
|
|
|
|
2020-07-11 23:00:39 +00:00
|
|
|
if len(data.Files) == 0 {
|
2020-04-06 01:00:33 +00:00
|
|
|
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
2020-07-11 23:00:39 +00:00
|
|
|
"error": "No files to move or rename were provided.",
|
2020-04-06 01:00:33 +00:00
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-11 00:33:39 +00:00
|
|
|
g, ctx := errgroup.WithContext(c.Request.Context())
|
2020-07-11 23:00:39 +00:00
|
|
|
// Loop over the array of files passed in and perform the move or rename action against each.
|
|
|
|
for _, p := range data.Files {
|
|
|
|
pf := path.Join(data.Root, p.From)
|
|
|
|
pt := path.Join(data.Root, p.To)
|
|
|
|
|
|
|
|
g.Go(func() error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
2021-01-11 00:33:39 +00:00
|
|
|
fs := s.Filesystem()
|
|
|
|
// Ignore renames on a file that is on the denylist (both as the rename from or
|
|
|
|
// the rename to value).
|
|
|
|
if err := fs.IsIgnored(pf, pt); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := fs.Rename(pf, pt); err != nil {
|
2020-07-31 22:01:32 +00:00
|
|
|
// Return nil if the error is an is not exists.
|
|
|
|
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
|
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2020-07-11 23:00:39 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := g.Wait(); err != nil {
|
2020-09-01 04:02:06 +00:00
|
|
|
if errors.Is(err, os.ErrExist) {
|
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "Cannot move or rename file, destination already exists.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-12-16 05:08:00 +00:00
|
|
|
NewServerError(err, s).AbortFilesystemError(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Status(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copies a server file.
|
|
|
|
func postServerCopyFile(c *gin.Context) {
|
2021-01-08 23:14:56 +00:00
|
|
|
s := ExtractServer(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
|
|
|
|
var data struct {
|
|
|
|
Location string `json:"location"`
|
|
|
|
}
|
2020-05-29 15:44:49 +00:00
|
|
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
|
|
|
if err := c.BindJSON(&data); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2020-04-06 01:00:33 +00:00
|
|
|
|
2021-01-11 00:33:39 +00:00
|
|
|
if err := s.Filesystem().IsIgnored(data.Location); err != nil {
|
|
|
|
NewServerError(err, s).Abort(c)
|
|
|
|
return
|
|
|
|
}
|
2020-09-27 19:24:08 +00:00
|
|
|
if err := s.Filesystem().Copy(data.Location); err != nil {
|
2020-12-16 05:08:00 +00:00
|
|
|
NewServerError(err, s).AbortFilesystemError(c)
|
2020-09-27 18:16:38 +00:00
|
|
|
return
|
2020-04-06 01:00:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.Status(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2020-07-11 22:33:53 +00:00
|
|
|
// Deletes files from a server.
|
|
|
|
func postServerDeleteFiles(c *gin.Context) {
|
2021-01-08 23:14:56 +00:00
|
|
|
s := ExtractServer(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
|
|
|
|
var data struct {
|
2020-07-11 22:33:53 +00:00
|
|
|
Root string `json:"root"`
|
|
|
|
Files []string `json:"files"`
|
2020-04-06 01:00:33 +00:00
|
|
|
}
|
2020-07-11 22:33:53 +00:00
|
|
|
|
2020-05-29 15:44:49 +00:00
|
|
|
if err := c.BindJSON(&data); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2020-04-06 01:00:33 +00:00
|
|
|
|
2020-07-11 22:33:53 +00:00
|
|
|
if len(data.Files) == 0 {
|
|
|
|
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
2020-07-31 22:01:32 +00:00
|
|
|
"error": "No files were specified for deletion.",
|
2020-07-11 22:33:53 +00:00
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
g, ctx := errgroup.WithContext(context.Background())
|
|
|
|
|
|
|
|
// Loop over the array of files passed in and delete them. If any of the file deletions
|
|
|
|
// fail just abort the process entirely.
|
|
|
|
for _, p := range data.Files {
|
|
|
|
pi := path.Join(data.Root, p)
|
|
|
|
|
|
|
|
g.Go(func() error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
2020-09-27 19:24:08 +00:00
|
|
|
return s.Filesystem().Delete(pi)
|
2020-07-11 22:33:53 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := g.Wait(); err != nil {
|
2020-12-16 05:08:00 +00:00
|
|
|
NewServerError(err, s).Abort(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Status(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Writes the contents of the request to a file on a server.
|
|
|
|
func postServerWriteFile(c *gin.Context) {
|
2021-01-08 23:14:56 +00:00
|
|
|
s := ExtractServer(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
|
2021-01-04 01:20:16 +00:00
|
|
|
f := c.Query("file")
|
2020-05-17 22:07:11 +00:00
|
|
|
f = "/" + strings.TrimLeft(f, "/")
|
|
|
|
|
2021-01-11 00:33:39 +00:00
|
|
|
if err := s.Filesystem().IsIgnored(f); err != nil {
|
|
|
|
NewServerError(err, s).Abort(c)
|
|
|
|
return
|
|
|
|
}
|
2020-09-27 19:24:08 +00:00
|
|
|
if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil {
|
2020-12-16 04:51:13 +00:00
|
|
|
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
|
2020-09-23 03:41:14 +00:00
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-12-16 05:08:00 +00:00
|
|
|
NewServerError(err, s).AbortFilesystemError(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Status(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2020-12-20 21:14:07 +00:00
|
|
|
// Returns all of the currently in-progress file downloads and their current download
|
|
|
|
// progress. The progress is also pushed out via a websocket event allowing you to just
|
|
|
|
// call this once to get current downloads, and then listen to targeted websocket events
|
|
|
|
// with the current progress for everything.
|
|
|
|
func getServerPullingFiles(c *gin.Context) {
|
|
|
|
s := ExtractServer(c)
|
|
|
|
c.JSON(http.StatusOK, gin.H{
|
2021-08-02 21:07:00 +00:00
|
|
|
"downloads": downloader.ByServer(s.ID()),
|
2020-12-20 21:14:07 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-12-16 04:19:09 +00:00
|
|
|
// Writes the contents of the remote URL to a file on a server.
|
2020-12-20 18:59:07 +00:00
|
|
|
func postServerPullRemoteFile(c *gin.Context) {
|
2020-12-16 04:19:09 +00:00
|
|
|
s := ExtractServer(c)
|
|
|
|
var data struct {
|
2021-08-03 03:15:25 +00:00
|
|
|
// Deprecated
|
2022-02-23 22:03:15 +00:00
|
|
|
Directory string `binding:"required_without=RootPath,omitempty" json:"directory"`
|
|
|
|
RootPath string `binding:"required_without=Directory,omitempty" json:"root"`
|
|
|
|
URL string `binding:"required" json:"url"`
|
|
|
|
FileName string `json:"file_name"`
|
|
|
|
UseHeader bool `json:"use_header"`
|
|
|
|
Foreground bool `json:"foreground"`
|
2020-12-16 04:19:09 +00:00
|
|
|
}
|
|
|
|
if err := c.BindJSON(&data); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-08-03 03:15:25 +00:00
|
|
|
// Handle the deprecated Directory field in the struct until it is removed.
|
|
|
|
if data.Directory != "" && data.RootPath == "" {
|
|
|
|
data.RootPath = data.Directory
|
|
|
|
}
|
|
|
|
|
2020-12-16 04:19:09 +00:00
|
|
|
u, err := url.Parse(data.URL)
|
|
|
|
if err != nil {
|
|
|
|
if e, ok := err.(*url.Error); ok {
|
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "An error occurred while parsing that URL: " + e.Err.Error(),
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
2020-12-20 18:59:07 +00:00
|
|
|
WithError(c, err)
|
2020-12-16 04:19:09 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-12-20 19:08:01 +00:00
|
|
|
if err := s.Filesystem().HasSpaceErr(true); err != nil {
|
|
|
|
WithError(c, err)
|
|
|
|
return
|
|
|
|
}
|
2020-12-24 17:01:00 +00:00
|
|
|
// Do not allow more than three simultaneous remote file downloads at one time.
|
2021-08-02 21:07:00 +00:00
|
|
|
if len(downloader.ByServer(s.ID())) >= 3 {
|
2020-12-24 17:01:00 +00:00
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "This server has reached its limit of 3 simultaneous remote file downloads at once. Please wait for one to complete before trying again.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
2020-12-20 19:08:01 +00:00
|
|
|
|
2020-12-20 18:59:07 +00:00
|
|
|
dl := downloader.New(s, downloader.DownloadRequest{
|
2021-08-02 21:16:38 +00:00
|
|
|
Directory: data.RootPath,
|
2020-12-20 18:59:07 +00:00
|
|
|
URL: u,
|
2022-02-23 22:03:15 +00:00
|
|
|
FileName: data.FileName,
|
|
|
|
UseHeader: data.UseHeader,
|
2020-12-20 18:59:07 +00:00
|
|
|
})
|
2020-12-16 04:19:09 +00:00
|
|
|
|
2022-02-23 22:03:15 +00:00
|
|
|
download := func() error {
|
2020-12-20 20:53:40 +00:00
|
|
|
s.Log().WithField("download_id", dl.Identifier).WithField("url", u.String()).Info("starting pull of remote file to disk")
|
|
|
|
if err := dl.Execute(); err != nil {
|
|
|
|
s.Log().WithField("download_id", dl.Identifier).WithField("error", err).Error("failed to pull remote file")
|
2022-02-23 22:03:15 +00:00
|
|
|
return err
|
2020-12-20 20:53:40 +00:00
|
|
|
} else {
|
|
|
|
s.Log().WithField("download_id", dl.Identifier).Info("completed pull of remote file")
|
|
|
|
}
|
2022-02-23 22:03:15 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if !data.Foreground {
|
|
|
|
go func() {
|
|
|
|
_ = download()
|
|
|
|
}()
|
|
|
|
c.JSON(http.StatusAccepted, gin.H{
|
|
|
|
"identifier": dl.Identifier,
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
2020-12-16 04:19:09 +00:00
|
|
|
|
2022-02-23 22:03:15 +00:00
|
|
|
if err := download(); err != nil {
|
|
|
|
NewServerError(err, s).Abort(c)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
st, err := s.Filesystem().Stat(dl.Path())
|
|
|
|
if err != nil {
|
|
|
|
NewServerError(err, s).AbortFilesystemError(c)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.JSON(http.StatusOK, &st)
|
2020-12-20 20:53:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stops a remote file download if it exists and belongs to this server.
|
|
|
|
func deleteServerPullRemoteFile(c *gin.Context) {
|
|
|
|
s := ExtractServer(c)
|
2020-12-20 21:14:07 +00:00
|
|
|
if dl := downloader.ByID(c.Param("download")); dl != nil && dl.BelongsTo(s) {
|
2020-12-20 20:53:40 +00:00
|
|
|
dl.Cancel()
|
|
|
|
}
|
2020-12-16 04:19:09 +00:00
|
|
|
c.Status(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2020-04-06 01:00:33 +00:00
|
|
|
// Create a directory on a server.
|
|
|
|
func postServerCreateDirectory(c *gin.Context) {
|
2021-01-08 23:14:56 +00:00
|
|
|
s := ExtractServer(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
|
|
|
|
var data struct {
|
|
|
|
Name string `json:"name"`
|
|
|
|
Path string `json:"path"`
|
|
|
|
}
|
2020-05-29 15:44:49 +00:00
|
|
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
|
|
|
if err := c.BindJSON(&data); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2020-04-06 01:00:33 +00:00
|
|
|
|
2020-09-27 19:24:08 +00:00
|
|
|
if err := s.Filesystem().CreateDirectory(data.Name, data.Path); err != nil {
|
2020-09-23 03:41:14 +00:00
|
|
|
if err.Error() == "not a directory" {
|
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "Part of the path being created is not a directory (ENOTDIR).",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-12-16 05:08:00 +00:00
|
|
|
NewServerError(err, s).Abort(c)
|
2020-04-06 01:00:33 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Status(http.StatusNoContent)
|
2020-05-17 22:07:11 +00:00
|
|
|
}
|
2020-07-11 20:13:49 +00:00
|
|
|
|
|
|
|
func postServerCompressFiles(c *gin.Context) {
|
2021-01-08 23:14:56 +00:00
|
|
|
s := ExtractServer(c)
|
2020-07-11 20:13:49 +00:00
|
|
|
|
|
|
|
var data struct {
|
|
|
|
RootPath string `json:"root"`
|
2020-07-11 20:28:17 +00:00
|
|
|
Files []string `json:"files"`
|
2020-07-11 20:13:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.BindJSON(&data); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-11 20:38:25 +00:00
|
|
|
if len(data.Files) == 0 {
|
|
|
|
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
|
|
|
"error": "No files were passed through to be compressed.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-27 19:24:08 +00:00
|
|
|
if !s.Filesystem().HasSpaceAvailable(true) {
|
2020-07-11 20:16:12 +00:00
|
|
|
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
|
|
|
"error": "This server does not have enough available disk space to generate a compressed archive.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-27 19:24:08 +00:00
|
|
|
f, err := s.Filesystem().CompressFiles(data.RootPath, data.Files)
|
2020-07-11 20:13:49 +00:00
|
|
|
if err != nil {
|
2020-12-16 05:08:00 +00:00
|
|
|
NewServerError(err, s).AbortFilesystemError(c)
|
2020-07-11 20:13:49 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-09-27 19:24:08 +00:00
|
|
|
c.JSON(http.StatusOK, &filesystem.Stat{
|
2021-01-16 20:03:55 +00:00
|
|
|
FileInfo: f,
|
2020-07-11 20:13:49 +00:00
|
|
|
Mimetype: "application/tar+gzip",
|
|
|
|
})
|
|
|
|
}
|
2020-07-15 18:28:45 +00:00
|
|
|
|
2021-01-16 19:02:57 +00:00
|
|
|
// postServerDecompressFiles receives the HTTP request and starts the process
|
|
|
|
// of unpacking an archive that exists on the server into the provided RootPath
|
|
|
|
// for the server.
|
2020-07-15 18:28:45 +00:00
|
|
|
func postServerDecompressFiles(c *gin.Context) {
|
|
|
|
var data struct {
|
|
|
|
RootPath string `json:"root"`
|
|
|
|
File string `json:"file"`
|
|
|
|
}
|
|
|
|
if err := c.BindJSON(&data); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-04-17 20:13:37 +00:00
|
|
|
s := middleware.ExtractServer(c)
|
|
|
|
lg := middleware.ExtractLogger(c).WithFields(log.Fields{"root_path": data.RootPath, "file": data.File})
|
2021-01-16 19:48:30 +00:00
|
|
|
lg.Debug("checking if space is available for file decompression")
|
|
|
|
err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File)
|
2020-07-16 04:16:08 +00:00
|
|
|
if err != nil {
|
2020-12-16 04:51:13 +00:00
|
|
|
if filesystem.IsErrorCode(err, filesystem.ErrCodeUnknownArchive) {
|
2021-01-16 19:48:30 +00:00
|
|
|
lg.WithField("error", err).Warn("failed to decompress file: unknown archive format")
|
2021-01-16 19:02:57 +00:00
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "The archive provided is in a format Wings does not understand."})
|
2020-09-20 18:51:12 +00:00
|
|
|
return
|
|
|
|
}
|
2021-01-16 19:02:57 +00:00
|
|
|
middleware.CaptureAndAbort(c, err)
|
2020-07-15 18:28:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-16 19:48:30 +00:00
|
|
|
lg.Info("starting file decompression")
|
2020-09-27 19:24:08 +00:00
|
|
|
if err := s.Filesystem().DecompressFile(data.RootPath, data.File); err != nil {
|
2020-09-16 02:53:00 +00:00
|
|
|
// If the file is busy for some reason just return a nicer error to the user since there is not
|
|
|
|
// much we specifically can do. They'll need to stop the running server process in order to overwrite
|
|
|
|
// a file like this.
|
|
|
|
if strings.Contains(err.Error(), "text file busy") {
|
2021-04-17 20:13:37 +00:00
|
|
|
lg.WithField("error", errors.WithStackIf(err)).Warn("failed to decompress file: text file busy")
|
2020-09-16 02:53:00 +00:00
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "One or more files this archive is attempting to overwrite are currently in use by another process. Please try again.",
|
|
|
|
})
|
2020-07-31 22:01:32 +00:00
|
|
|
return
|
|
|
|
}
|
2021-01-16 19:48:30 +00:00
|
|
|
middleware.CaptureAndAbort(c, err)
|
2020-07-15 18:28:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
c.Status(http.StatusNoContent)
|
|
|
|
}
|
2020-07-31 22:31:06 +00:00
|
|
|
|
2020-11-29 20:07:45 +00:00
|
|
|
type chmodFile struct {
|
2020-11-29 20:44:28 +00:00
|
|
|
File string `json:"file"`
|
|
|
|
Mode string `json:"mode"`
|
2020-11-29 20:07:45 +00:00
|
|
|
}
|
|
|
|
|
2020-12-25 21:41:38 +00:00
|
|
|
var errInvalidFileMode = errors.New("invalid file mode")
|
|
|
|
|
2020-11-29 20:07:45 +00:00
|
|
|
func postServerChmodFile(c *gin.Context) {
|
2021-01-08 23:14:56 +00:00
|
|
|
s := ExtractServer(c)
|
2020-11-29 20:07:45 +00:00
|
|
|
|
|
|
|
var data struct {
|
|
|
|
Root string `json:"root"`
|
|
|
|
Files []chmodFile `json:"files"`
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := c.BindJSON(&data); err != nil {
|
2020-11-29 20:44:28 +00:00
|
|
|
log.Debug(err.Error())
|
2020-11-29 20:07:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(data.Files) == 0 {
|
|
|
|
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
|
|
|
"error": "No files to chmod were provided.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
g, ctx := errgroup.WithContext(context.Background())
|
|
|
|
|
|
|
|
// Loop over the array of files passed in and perform the move or rename action against each.
|
|
|
|
for _, p := range data.Files {
|
|
|
|
g.Go(func() error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
2020-11-29 20:48:15 +00:00
|
|
|
mode, err := strconv.ParseUint(p.Mode, 8, 32)
|
2020-11-29 20:44:28 +00:00
|
|
|
if err != nil {
|
2020-12-25 21:41:38 +00:00
|
|
|
return errInvalidFileMode
|
2020-11-29 20:44:28 +00:00
|
|
|
}
|
|
|
|
|
2020-11-29 20:48:15 +00:00
|
|
|
if err := s.Filesystem().Chmod(path.Join(data.Root, p.File), os.FileMode(mode)); err != nil {
|
2020-11-29 20:07:45 +00:00
|
|
|
// Return nil if the error is an is not exists.
|
|
|
|
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
|
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := g.Wait(); err != nil {
|
2020-12-25 21:41:38 +00:00
|
|
|
if errors.Is(err, errInvalidFileMode) {
|
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "Invalid file mode.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-12-16 05:08:00 +00:00
|
|
|
NewServerError(err, s).AbortFilesystemError(c)
|
2020-11-29 20:07:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Status(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
2020-07-12 22:43:25 +00:00
|
|
|
func postServerUploadFiles(c *gin.Context) {
|
2021-01-26 04:28:24 +00:00
|
|
|
manager := middleware.ExtractManager(c)
|
2021-01-08 23:14:56 +00:00
|
|
|
|
2020-07-12 22:43:25 +00:00
|
|
|
token := tokens.UploadPayload{}
|
|
|
|
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
2020-12-16 05:08:00 +00:00
|
|
|
NewTrackedError(err).Abort(c)
|
2020-07-12 22:43:25 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-26 04:28:24 +00:00
|
|
|
s, ok := manager.Get(token.ServerUuid)
|
|
|
|
if !ok || !token.IsUniqueRequest() {
|
2020-07-12 22:43:25 +00:00
|
|
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
|
|
|
"error": "The requested resource was not found on this server.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
form, err := c.MultipartForm()
|
|
|
|
if err != nil {
|
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
2020-09-10 04:12:06 +00:00
|
|
|
"error": "Failed to get multipart form data from request.",
|
2020-07-12 22:43:25 +00:00
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
headers, ok := form.File["files"]
|
|
|
|
if !ok {
|
2020-09-10 04:12:06 +00:00
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "No files were found on the request body.",
|
2020-07-12 22:43:25 +00:00
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
directory := c.Query("directory")
|
|
|
|
|
2022-02-21 22:59:28 +00:00
|
|
|
maxFileSize := config.Get().Api.UploadLimit
|
|
|
|
maxFileSizeBytes := maxFileSize * 1024 * 1024
|
2020-09-26 23:02:41 +00:00
|
|
|
var totalSize int64
|
|
|
|
for _, header := range headers {
|
2022-02-21 22:59:28 +00:00
|
|
|
if header.Size > maxFileSizeBytes {
|
|
|
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
|
|
|
"error": "File " + header.Filename + " is larger than the maximum file upload size of " + strconv.FormatInt(maxFileSize, 10) + " MB.",
|
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
2020-09-26 23:02:41 +00:00
|
|
|
totalSize += header.Size
|
|
|
|
}
|
|
|
|
|
2020-07-12 22:43:25 +00:00
|
|
|
for _, header := range headers {
|
2020-09-27 19:24:08 +00:00
|
|
|
p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename))
|
2020-07-12 22:43:25 +00:00
|
|
|
if err != nil {
|
2021-01-11 01:01:41 +00:00
|
|
|
NewServerError(err, s).Abort(c)
|
2020-07-12 22:43:25 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We run this in a different method so I can use defer without any of
|
|
|
|
// the consequences caused by calling it in a loop.
|
|
|
|
if err := handleFileUpload(p, s, header); err != nil {
|
2021-01-11 01:01:41 +00:00
|
|
|
NewServerError(err, s).Abort(c)
|
2020-07-12 22:43:25 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader) error {
|
|
|
|
file, err := header.Open()
|
|
|
|
if err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-07-12 22:43:25 +00:00
|
|
|
}
|
|
|
|
defer file.Close()
|
|
|
|
|
2021-01-11 00:33:39 +00:00
|
|
|
if err := s.Filesystem().IsIgnored(p); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-09-27 19:24:08 +00:00
|
|
|
if err := s.Filesystem().Writefile(p, file); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-07-12 22:43:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|