Merge branch 'dane/backup-restore' into develop

This commit is contained in:
Dane Everitt 2021-01-30 18:51:35 -08:00
commit ba6cec9615
No known key found for this signature in database
GPG Key ID: EEA66103B3D71F53
19 changed files with 517 additions and 303 deletions

View File

@ -36,14 +36,25 @@ type BackupRequest struct {
Successful bool `json:"successful"` Successful bool `json:"successful"`
} }
// Notifies the panel that a specific backup has been completed and is now // SendBackupStatus notifies the panel that a specific backup has been completed
// available for a user to view and download. // and is now available for a user to view and download.
func (r *Request) SendBackupStatus(backup string, data BackupRequest) error { func (r *Request) SendBackupStatus(backup string, data BackupRequest) error {
resp, err := r.Post(fmt.Sprintf("/backups/%s", backup), data) resp, err := r.Post(fmt.Sprintf("/backups/%s", backup), data)
if err != nil { if err != nil {
return err return err
} }
defer resp.Body.Close() defer resp.Body.Close()
return resp.Error()
}
// SendRestorationStatus triggers a request to the Panel to notify it that a
// restoration has been completed and the server should be marked as being
// activated again.
func (r *Request) SendRestorationStatus(backup string, successful bool) error {
resp, err := r.Post(fmt.Sprintf("/backups/%s/restore", backup), D{"successful": successful})
if err != nil {
return err
}
defer resp.Body.Close()
return resp.Error() return resp.Error()
} }

View File

@ -2,6 +2,11 @@ package docker
import ( import (
"context" "context"
"fmt"
"io"
"sync"
"emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/client" "github.com/docker/docker/client"
@ -9,8 +14,6 @@ import (
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events" "github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
"io"
"sync"
) )
type Metadata struct { type Metadata struct {
@ -187,3 +190,26 @@ func (e *Environment) SetImage(i string) {
e.meta.Image = i e.meta.Image = i
} }
func (e *Environment) State() string {
return e.st.Load()
}
// SetState sets the state of the environment. This emits an event that server's
// can hook into to take their own actions and track their own state based on
// the environment.
func (e *Environment) SetState(state string) {
if state != environment.ProcessOfflineState &&
state != environment.ProcessStartingState &&
state != environment.ProcessRunningState &&
state != environment.ProcessStoppingState {
panic(errors.New(fmt.Sprintf("invalid server state received: %s", state)))
}
// Emit the event to any listeners that are currently registered.
if e.State() != state {
// If the state changed make sure we update the internal tracking to note that.
e.st.Store(state)
e.Events().Publish(environment.StateChangeEvent, state)
}
}

View File

@ -1,29 +0,0 @@
package docker
import (
"emperror.dev/errors"
"fmt"
"github.com/pterodactyl/wings/environment"
)
func (e *Environment) State() string {
return e.st.Load()
}
// Sets the state of the environment. This emits an event that server's can hook into to
// take their own actions and track their own state based on the environment.
func (e *Environment) SetState(state string) {
if state != environment.ProcessOfflineState &&
state != environment.ProcessStartingState &&
state != environment.ProcessRunningState &&
state != environment.ProcessStoppingState {
panic(errors.New(fmt.Sprintf("invalid server state received: %s", state)))
}
// Emit the event to any listeners that are currently registered.
if e.State() != state {
// If the state changed make sure we update the internal tracking to note that.
e.st.Store(state)
e.Events().Publish(environment.StateChangeEvent, state)
}
}

View File

@ -96,6 +96,7 @@ func Configure(m *server.Manager) *gin.Engine {
backup := server.Group("/backup") backup := server.Group("/backup")
{ {
backup.POST("", postServerBackup) backup.POST("", postServerBackup)
backup.POST("/:backup/restore", postServerRestoreBackup)
backup.DELETE("/:backup", deleteServerBackup) backup.DELETE("/:backup", deleteServerBackup)
} }
} }

View File

@ -1,64 +1,164 @@
package router package router
import ( import (
"fmt"
"net/http" "net/http"
"os" "os"
"strings"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/backup" "github.com/pterodactyl/wings/server/backup"
) )
// Backs up a server. // postServerBackup performs a backup against a given server instance using the
// provided backup adapter.
func postServerBackup(c *gin.Context) { func postServerBackup(c *gin.Context) {
s := ExtractServer(c) s := middleware.ExtractServer(c)
logger := middleware.ExtractLogger(c)
data := &backup.Request{} var data struct {
// BindJSON sends 400 if the request fails, all we need to do is return Adapter backup.AdapterType `json:"adapter"`
Uuid string `json:"uuid"`
Ignore string `json:"ignore"`
}
if err := c.BindJSON(&data); err != nil { if err := c.BindJSON(&data); err != nil {
return return
} }
var adapter backup.BackupInterface var adapter backup.BackupInterface
var err error
switch data.Adapter { switch data.Adapter {
case backup.LocalBackupAdapter: case backup.LocalBackupAdapter:
adapter, err = data.NewLocalBackup() adapter = backup.NewLocal(data.Uuid, data.Ignore)
case backup.S3BackupAdapter: case backup.S3BackupAdapter:
adapter, err = data.NewS3Backup() adapter = backup.NewS3(data.Uuid, data.Ignore)
default: default:
err = errors.New(fmt.Sprintf("unknown backup adapter [%s] provided", data.Adapter)) middleware.CaptureAndAbort(c, errors.New("router/backups: provided adapter is not valid: "+string(data.Adapter)))
return return
} }
if err != nil { // Attach the server ID and the request ID to the adapter log context for easier
NewServerError(err, s).Abort(c) // parsing in the logs.
return
}
// Attach the server ID to the backup log output for easier parsing.
adapter.WithLogContext(map[string]interface{}{ adapter.WithLogContext(map[string]interface{}{
"server": s.Id(), "server": s.Id(),
"request_id": c.GetString("request_id"),
}) })
go func(b backup.BackupInterface, serv *server.Server) { go func(b backup.BackupInterface, s *server.Server, logger *log.Entry) {
if err := serv.Backup(b); err != nil { if err := s.Backup(b); err != nil {
serv.Log().WithField("error", errors.WithStackIf(err)).Error("failed to generate backup for server") logger.WithField("error", errors.WithStackIf(err)).Error("router: failed to generate server backup")
} }
}(adapter, s) }(adapter, s, logger)
c.Status(http.StatusAccepted) c.Status(http.StatusAccepted)
} }
// Deletes a local backup of a server. If the backup is not found on the machine just return // postServerRestoreBackup handles restoring a backup for a server by downloading
// a 404 error. The service calling this endpoint can make its own decisions as to how it wants // or finding the given backup on the system and then unpacking the archive into
// to handle that response. // the server's data directory. If the TruncateDirectory field is provided and
func deleteServerBackup(c *gin.Context) { // is true all of the files will be deleted for the server.
s := ExtractServer(c) //
// This endpoint will block until the backup is fully restored allowing for a
// spinner to be displayed in the Panel UI effectively.
//
// TODO: stop the server if it is running; internally mark it as suspended
func postServerRestoreBackup(c *gin.Context) {
s := middleware.ExtractServer(c)
logger := middleware.ExtractLogger(c)
var data struct {
Adapter backup.AdapterType `binding:"required,oneof=wings s3" json:"adapter"`
TruncateDirectory bool `json:"truncate_directory"`
// A UUID is always required for this endpoint, however the download URL
// is only present when the given adapter type is s3.
DownloadUrl string `json:"download_url"`
}
if err := c.BindJSON(&data); err != nil {
return
}
if data.Adapter == backup.S3BackupAdapter && data.DownloadUrl == "" {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "The download_url field is required when the backup adapter is set to S3."})
return
}
logger.Info("processing server backup restore request")
if data.TruncateDirectory {
logger.Info(`recieved "truncate_directory" flag in request: deleting server files`)
if err := s.Filesystem().TruncateRootDirectory(); err != nil {
middleware.CaptureAndAbort(c, err)
return
}
}
// Now that we've cleaned up the data directory if necessary, grab the backup file
// and attempt to restore it into the server directory.
if data.Adapter == backup.LocalBackupAdapter {
b, _, err := backup.LocateLocal(c.Param("backup"))
if err != nil {
middleware.CaptureAndAbort(c, err)
return
}
go func(s *server.Server, b backup.BackupInterface, logger *log.Entry) {
logger.Info("starting restoration process for server backup using local driver")
if err := s.RestoreBackup(b, nil); err != nil {
logger.WithField("error", err).Error("failed to restore local backup to server")
}
s.Events().Publish(server.DaemonMessageEvent, "Completed server restoration from local backup.")
s.Events().Publish(server.BackupRestoreCompletedEvent, "")
logger.Info("completed server restoration from local backup")
}(s, b, logger)
c.Status(http.StatusAccepted)
return
}
// Since this is not a local backup we need to stream the archive and then
// parse over the contents as we go in order to restore it to the server.
client := http.Client{}
logger.Info("downloading backup from remote location...")
// TODO: this will hang if there is an issue. We can't use c.Request.Context() (or really any)
// since it will be canceled when the request is closed which happens quickly since we push
// this into the background.
//
// For now I'm just using the server context so at least the request is canceled if
// the server gets deleted.
req, err := http.NewRequestWithContext(s.Context(), http.MethodGet, data.DownloadUrl, nil)
if err != nil {
middleware.CaptureAndAbort(c, err)
return
}
res, err := client.Do(req)
if err != nil {
middleware.CaptureAndAbort(c, err)
return
}
// Don't allow content types that we know are going to give us problems.
if res.Header.Get("Content-Type") == "" || !strings.Contains("application/x-gzip application/gzip", res.Header.Get("Content-Type")) {
res.Body.Close()
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "The provided backup link is not a supported content type. \"" + res.Header.Get("Content-Type") + "\" is not application/x-gzip.",
})
return
}
go func(s *server.Server, uuid string, logger *log.Entry) {
logger.Info("starting restoration process for server backup using S3 driver")
if err := s.RestoreBackup(backup.NewS3(uuid, ""), res.Body); err != nil {
logger.WithField("error", errors.WithStack(err)).Error("failed to restore remote S3 backup to server")
}
s.Events().Publish(server.DaemonMessageEvent, "Completed server restoration from S3 backup.")
s.Events().Publish(server.BackupRestoreCompletedEvent, "")
logger.Info("completed server restoration from S3 backup")
}(s, c.Param("backup"), logger)
c.Status(http.StatusAccepted)
}
// deleteServerBackup deletes a local backup of a server. If the backup is not
// found on the machine just return a 404 error. The service calling this
// endpoint can make its own decisions as to how it wants to handle that
// response.
func deleteServerBackup(c *gin.Context) {
b, _, err := backup.LocateLocal(c.Param("backup")) b, _, err := backup.LocateLocal(c.Param("backup"))
if err != nil { if err != nil {
// Just return from the function at this point if the backup was not located. // Just return from the function at this point if the backup was not located.
@ -68,20 +168,15 @@ func deleteServerBackup(c *gin.Context) {
}) })
return return
} }
middleware.CaptureAndAbort(c, err)
NewServerError(err, s).Abort(c)
return return
} }
// I'm not entirely sure how likely this is to happen, however if we did manage to
if err := b.Remove(); err != nil { // locate the backup previously and it is now missing when we go to delete, just
// I'm not entirely sure how likely this is to happen, however if we did manage to locate // treat it as having been successful, rather than returning a 404.
// the backup previously and it is now missing when we go to delete, just treat it as having if err := b.Remove(); err != nil && !errors.Is(err, os.ErrNotExist) {
// been successful, rather than returning a 404. middleware.CaptureAndAbort(c, err)
if !errors.Is(err, os.ErrNotExist) {
NewServerError(err, s).Abort(c)
return return
} }
}
c.Status(http.StatusNoContent) c.Status(http.StatusNoContent)
} }

View File

@ -45,6 +45,7 @@ var e = []string{
server.InstallCompletedEvent, server.InstallCompletedEvent,
server.DaemonMessageEvent, server.DaemonMessageEvent,
server.BackupCompletedEvent, server.BackupCompletedEvent,
server.BackupRestoreCompletedEvent,
server.TransferLogsEvent, server.TransferLogsEvent,
server.TransferStatusEvent, server.TransferStatusEvent,
} }

View File

@ -1,12 +1,15 @@
package server package server
import ( import (
"io"
"io/ioutil" "io/ioutil"
"os" "os"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/api" "github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/server/backup" "github.com/pterodactyl/wings/server/backup"
) )
@ -50,9 +53,9 @@ func (s *Server) getServerwideIgnoredFiles() (string, error) {
return string(b), nil return string(b), nil
} }
// Performs a server backup and then emits the event over the server websocket. We // Backup performs a server backup and then emits the event over the server
// let the actual backup system handle notifying the panel of the status, but that // websocket. We let the actual backup system handle notifying the panel of the
// won't emit a websocket event. // status, but that won't emit a websocket event.
func (s *Server) Backup(b backup.BackupInterface) error { func (s *Server) Backup(b backup.BackupInterface) error {
ignored := b.Ignored() ignored := b.Ignored()
if b.Ignored() == "" { if b.Ignored() == "" {
@ -108,3 +111,49 @@ func (s *Server) Backup(b backup.BackupInterface) error {
return nil return nil
} }
// RestoreBackup calls the Restore function on the provided backup. Once this
// restoration is completed an event is emitted to the websocket to notify the
// Panel that is has been completed.
//
// In addition to the websocket event an API call is triggered to notify the
// Panel of the new state.
func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (err error) {
s.Config().SetSuspended(true)
// Local backups will not pass a reader through to this function, so check first
// to make sure it is a valid reader before trying to close it.
defer func() {
s.Config().SetSuspended(false)
if reader != nil {
reader.Close()
}
}()
// Send an API call to the Panel as soon as this function is done running so that
// the Panel is informed of the restoration status of this backup.
defer func() {
if rerr := api.New().SendRestorationStatus(b.Identifier(), err == nil); rerr != nil {
s.Log().WithField("error", rerr).WithField("backup", b.Identifier()).Error("failed to notify Panel of backup restoration status")
}
}()
// Don't try to restore the server until we have completely stopped the running
// instance, otherwise you'll likely hit all types of write errors due to the
// server being suspended.
if s.Environment.State() != environment.ProcessOfflineState {
if err = s.Environment.WaitForStop(120, false); err != nil {
if !client.IsErrNotFound(err) {
return errors.WrapIf(err, "server/backup: restore: failed to wait for container stop")
}
}
}
// Attempt to restore the backup to the server by running through each entry
// in the file one at a time and writing them to the disk.
s.Log().Debug("starting file writing process for backup restoration")
err = b.Restore(reader, func(file string, r io.Reader) error {
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
return s.Filesystem().Writefile(file, r)
})
return errors.WithStackIf(err)
}

View File

@ -1,7 +1,13 @@
package filesystem package backup
import ( import (
"archive/tar" "archive/tar"
"io"
"os"
"path/filepath"
"strings"
"sync"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/juju/ratelimit" "github.com/juju/ratelimit"
@ -9,11 +15,6 @@ import (
"github.com/klauspost/pgzip" "github.com/klauspost/pgzip"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/sabhiram/go-gitignore" "github.com/sabhiram/go-gitignore"
"io"
"os"
"path/filepath"
"strings"
"sync"
) )
const memory = 4 * 1024 const memory = 4 * 1024
@ -39,7 +40,8 @@ type Archive struct {
Files []string Files []string
} }
// Creates an archive at dst with all of the files defined in the included files struct. // Create creates an archive at dst with all of the files defined in the
// included files struct.
func (a *Archive) Create(dst string) error { func (a *Archive) Create(dst string) error {
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil { if err != nil {

View File

@ -3,13 +3,14 @@ package backup
import ( import (
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
"github.com/apex/log"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"io" "io"
"os" "os"
"path" "path"
"sync" "sync"
"github.com/apex/log"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
) )
type AdapterType string type AdapterType string
@ -19,13 +20,17 @@ const (
S3BackupAdapter AdapterType = "s3" S3BackupAdapter AdapterType = "s3"
) )
// RestoreCallback is a generic restoration callback that exists for both local
// and remote backups allowing the files to be restored.
type RestoreCallback func(file string, r io.Reader) error
type ArchiveDetails struct { type ArchiveDetails struct {
Checksum string `json:"checksum"` Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"` ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"` Size int64 `json:"size"`
} }
// Returns a request object. // ToRequest returns a request object.
func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest { func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
return api.BackupRequest{ return api.BackupRequest{
Checksum: ad.Checksum, Checksum: ad.Checksum,
@ -50,35 +55,33 @@ type Backup struct {
// noinspection GoNameStartsWithPackageName // noinspection GoNameStartsWithPackageName
type BackupInterface interface { type BackupInterface interface {
// Returns the UUID of this backup as tracked by the panel instance. // Identifier returns the UUID of this backup as tracked by the panel
// instance.
Identifier() string Identifier() string
// WithLogContext attaches additional context to the log output for this
// Attaches additional context to the log output for this backup. // backup.
WithLogContext(map[string]interface{}) WithLogContext(map[string]interface{})
// Generate creates a backup in whatever the configured source for the
// Generates a backup in whatever the configured source for the specific // specific implementation is.
// implementation is.
Generate(string, string) (*ArchiveDetails, error) Generate(string, string) (*ArchiveDetails, error)
// Ignored returns the ignored files for this backup instance.
// Returns the ignored files for this backup instance.
Ignored() string Ignored() string
// Checksum returns a SHA1 checksum for the generated backup.
// Returns a SHA1 checksum for the generated backup.
Checksum() ([]byte, error) Checksum() ([]byte, error)
// Size returns the size of the generated backup.
// Returns the size of the generated backup.
Size() (int64, error) Size() (int64, error)
// Path returns the path to the backup on the machine. This is not always
// Returns the path to the backup on the machine. This is not always the final // the final storage location of the backup, simply the location we're using
// storage location of the backup, simply the location we're using to store // to store it until it is moved to the final spot.
// it until it is moved to the final spot.
Path() string Path() string
// Details returns details about the archive.
// Returns details about the archive.
Details() *ArchiveDetails Details() *ArchiveDetails
// Remove removes a backup file.
// Removes a backup file.
Remove() error Remove() error
// Restore is called when a backup is ready to be restored to the disk from
// the given source. Not every backup implementation will support this nor
// will every implementation require a reader be provided.
Restore(reader io.Reader, callback RestoreCallback) error
} }
func (b *Backup) Identifier() string { func (b *Backup) Identifier() string {

View File

@ -2,8 +2,11 @@ package backup
import ( import (
"errors" "errors"
"github.com/pterodactyl/wings/server/filesystem" "io"
"os" "os"
"github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/system"
) )
type LocalBackup struct { type LocalBackup struct {
@ -12,8 +15,18 @@ type LocalBackup struct {
var _ BackupInterface = (*LocalBackup)(nil) var _ BackupInterface = (*LocalBackup)(nil)
// Locates the backup for a server and returns the local path. This will obviously only func NewLocal(uuid string, ignore string) *LocalBackup {
// work if the backup was created as a local backup. return &LocalBackup{
Backup{
Uuid: uuid,
Ignore: ignore,
adapter: LocalBackupAdapter,
},
}
}
// LocateLocal finds the backup for a server and returns the local path. This
// will obviously only work if the backup was created as a local backup.
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) { func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
b := &LocalBackup{ b := &LocalBackup{
Backup{ Backup{
@ -34,20 +47,20 @@ func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
return b, st, nil return b, st, nil
} }
// Removes a backup from the system. // Remove removes a backup from the system.
func (b *LocalBackup) Remove() error { func (b *LocalBackup) Remove() error {
return os.Remove(b.Path()) return os.Remove(b.Path())
} }
// Attaches additional context to the log output for this backup. // WithLogContext attaches additional context to the log output for this backup.
func (b *LocalBackup) WithLogContext(c map[string]interface{}) { func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
b.logContext = c b.logContext = c
} }
// Generates a backup of the selected files and pushes it to the defined location // Generate generates a backup of the selected files and pushes it to the
// for this instance. // defined location for this instance.
func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) { func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
a := &filesystem.Archive{ a := &Archive{
BasePath: basePath, BasePath: basePath,
Ignore: ignore, Ignore: ignore,
} }
@ -60,3 +73,18 @@ func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error)
return b.Details(), nil return b.Details(), nil
} }
// Restore will walk over the archive and call the callback function for each
// file encountered.
func (b *LocalBackup) Restore(_ io.Reader, callback RestoreCallback) error {
return archiver.Walk(b.Path(), func(f archiver.File) error {
if f.IsDir() {
return nil
}
name, err := system.ExtractArchiveSourceName(f, "/")
if err != nil {
return err
}
return callback(name, f)
})
}

View File

@ -1,42 +0,0 @@
package backup
import (
"errors"
"fmt"
)
type Request struct {
Adapter AdapterType `json:"adapter"`
Uuid string `json:"uuid"`
Ignore string `json:"ignore"`
}
// Generates a new local backup struct.
func (r *Request) NewLocalBackup() (*LocalBackup, error) {
if r.Adapter != LocalBackupAdapter {
return nil, errors.New(fmt.Sprintf("cannot create local backup using [%s] adapter", r.Adapter))
}
return &LocalBackup{
Backup{
Uuid: r.Uuid,
Ignore: r.Ignore,
adapter: LocalBackupAdapter,
},
}, nil
}
// Generates a new S3 backup struct.
func (r *Request) NewS3Backup() (*S3Backup, error) {
if r.Adapter != S3BackupAdapter {
return nil, errors.New(fmt.Sprintf("cannot create s3 backup using [%s] adapter", r.Adapter))
}
return &S3Backup{
Backup: Backup{
Uuid: r.Uuid,
Ignore: r.Ignore,
adapter: S3BackupAdapter,
},
}, nil
}

View File

@ -1,13 +1,17 @@
package backup package backup
import ( import (
"archive/tar"
"compress/gzip"
"fmt" "fmt"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/server/filesystem"
"io" "io"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
"github.com/juju/ratelimit"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
) )
type S3Backup struct { type S3Backup struct {
@ -16,22 +20,32 @@ type S3Backup struct {
var _ BackupInterface = (*S3Backup)(nil) var _ BackupInterface = (*S3Backup)(nil)
// Removes a backup from the system. func NewS3(uuid string, ignore string) *S3Backup {
return &S3Backup{
Backup{
Uuid: uuid,
Ignore: ignore,
adapter: S3BackupAdapter,
},
}
}
// Remove removes a backup from the system.
func (s *S3Backup) Remove() error { func (s *S3Backup) Remove() error {
return os.Remove(s.Path()) return os.Remove(s.Path())
} }
// Attaches additional context to the log output for this backup. // WithLogContext attaches additional context to the log output for this backup.
func (s *S3Backup) WithLogContext(c map[string]interface{}) { func (s *S3Backup) WithLogContext(c map[string]interface{}) {
s.logContext = c s.logContext = c
} }
// Generates a new backup on the disk, moves it into the S3 bucket via the provided // Generate creates a new backup on the disk, moves it into the S3 bucket via
// presigned URL, and then deletes the backup from the disk. // the provided presigned URL, and then deletes the backup from the disk.
func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) { func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
defer s.Remove() defer s.Remove()
a := &filesystem.Archive{ a := &Archive{
BasePath: basePath, BasePath: basePath,
Ignore: ignore, Ignore: ignore,
} }
@ -139,3 +153,40 @@ func (s *S3Backup) generateRemoteRequest(rc io.ReadCloser) error {
return nil return nil
} }
// Restore will read from the provided reader assuming that it is a gzipped
// tar reader. When a file is encountered in the archive the callback function
// will be triggered. If the callback returns an error the entire process is
// stopped, otherwise this function will run until all files have been written.
//
// This restoration uses a workerpool to use up to the number of CPUs available
// on the machine when writing files to the disk.
func (s *S3Backup) Restore(r io.Reader, callback RestoreCallback) error {
reader := r
// Steal the logic we use for making backups which will be applied when restoring
// this specific backup. This allows us to prevent overloading the disk unintentionally.
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
reader = ratelimit.Reader(r, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
}
gr, err := gzip.NewReader(reader)
if err != nil {
return err
}
defer gr.Close()
tr := tar.NewReader(gr)
for {
header, err := tr.Next()
if err != nil {
if err == io.EOF {
break
}
return err
}
if header.Typeflag == tar.TypeReg {
if err := callback(header.Name, tr); err != nil {
return err
}
}
}
return nil
}

View File

@ -14,6 +14,7 @@ const (
ConsoleOutputEvent = "console output" ConsoleOutputEvent = "console output"
StatusEvent = "status" StatusEvent = "status"
StatsEvent = "stats" StatsEvent = "stats"
BackupRestoreCompletedEvent = "backup restore completed"
BackupCompletedEvent = "backup completed" BackupCompletedEvent = "backup completed"
TransferLogsEvent = "transfer logs" TransferLogsEvent = "transfer logs"
TransferStatusEvent = "transfer status" TransferStatusEvent = "transfer status"

View File

@ -6,16 +6,23 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"strings" "strings"
"sync/atomic"
"time" "time"
"github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/server/backup"
"github.com/pterodactyl/wings/system"
) )
// Compresses all of the files matching the given paths in the specified directory. This function // CompressFiles compresses all of the files matching the given paths in the
// also supports passing nested paths to only compress certain files and folders when working in // specified directory. This function also supports passing nested paths to only
// a larger directory. This effectively creates a local backup, but rather than ignoring specific // compress certain files and folders when working in a larger directory. This
// files and folders, it takes an allow-list of files and folders. // effectively creates a local backup, but rather than ignoring specific files
// and folders, it takes an allow-list of files and folders.
// //
// All paths are relative to the dir that is passed in as the first argument, and the compressed // All paths are relative to the dir that is passed in as the first argument,
// file will be placed at that location named `archive-{date}.tar.gz`. // and the compressed file will be placed at that location named
// `archive-{date}.tar.gz`.
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) { func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
cleanedRootDir, err := fs.SafePath(dir) cleanedRootDir, err := fs.SafePath(dir)
if err != nil { if err != nil {
@ -32,7 +39,7 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
return nil, err return nil, err
} }
a := &Archive{BasePath: cleanedRootDir, Files: cleaned} a := &backup.Archive{BasePath: cleanedRootDir, Files: cleaned}
d := path.Join( d := path.Join(
cleanedRootDir, cleanedRootDir,
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")), fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
@ -57,3 +64,84 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
return f, nil return f, nil
} }
// SpaceAvailableForDecompression looks through a given archive and determines
// if decompressing it would put the server over its allocated disk space limit.
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error {
// Don't waste time trying to determine this if we know the server will have the space for
// it since there is no limit.
if fs.MaxDisk() <= 0 {
return nil
}
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Get the cached size in a parallel process so that if it is not cached we are not
// waiting an unnecessary amount of time on this call.
dirSize, err := fs.DiskUsage(false)
var size int64
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
err = archiver.Walk(source, func(f archiver.File) error {
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
return &Error{code: ErrCodeDiskSpace}
}
return nil
})
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return &Error{code: ErrCodeUnknownArchive}
}
return err
}
return err
}
// DecompressFile will decompress a file in a given directory by using the
// archiver tool to infer the file type and go from there. This will walk over
// all of the files within the given archive and ensure that there is not a
// zip-slip attack being attempted by validating that the final path is within
// the server data directory.
func (fs *Filesystem) DecompressFile(dir string, file string) error {
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Ensure that the source archive actually exists on the system.
if _, err := os.Stat(source); err != nil {
return err
}
// Walk all of the files in the archiver file and write them to the disk. If any
// directory is encountered it will be skipped since we handle creating any missing
// directories automatically when writing files.
err = archiver.Walk(source, func(f archiver.File) error {
if f.IsDir() {
return nil
}
name, err := system.ExtractArchiveSourceName(f, dir)
if err != nil {
return WrapError(err, filepath.Join(dir, f.Name()))
}
p := filepath.Join(dir, name)
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
return nil
}
if err := fs.Writefile(p, f); err != nil {
return &Error{code: ErrCodeUnknownError, err: err, resolved: source}
}
return nil
})
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return &Error{code: ErrCodeUnknownArchive}
}
return err
}
return nil
}

View File

@ -1,111 +0,0 @@
package filesystem
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"sync/atomic"
"emperror.dev/errors"
"github.com/mholt/archiver/v3"
)
// SpaceAvailableForDecompression looks through a given archive and determines
// if decompressing it would put the server over its allocated disk space limit.
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error {
// Don't waste time trying to determine this if we know the server will have the space for
// it since there is no limit.
if fs.MaxDisk() <= 0 {
return nil
}
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Get the cached size in a parallel process so that if it is not cached we are not
// waiting an unnecessary amount of time on this call.
dirSize, err := fs.DiskUsage(false)
var size int64
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
err = archiver.Walk(source, func(f archiver.File) error {
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
return &Error{code: ErrCodeDiskSpace}
}
return nil
})
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return &Error{code: ErrCodeUnknownArchive}
}
return err
}
return err
}
// DecompressFile will decompress a file in a given directory by using the
// archiver tool to infer the file type and go from there. This will walk over
// all of the files within the given archive and ensure that there is not a
// zip-slip attack being attempted by validating that the final path is within
// the server data directory.
func (fs *Filesystem) DecompressFile(dir string, file string) error {
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Ensure that the source archive actually exists on the system.
if _, err := os.Stat(source); err != nil {
return err
}
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
// and then extract that file from the archive and write it to the disk. If any part of this process
// encounters an error the entire process will be stopped.
err = archiver.Walk(source, func(f archiver.File) error {
// Don't waste time with directories, we don't need to create them if they have no contents, and
// we will ensure the directory exists when opening the file for writing anyways.
if f.IsDir() {
return nil
}
var name string
switch s := f.Sys().(type) {
case *tar.Header:
name = s.Name
case *gzip.Header:
name = s.Name
case *zip.FileHeader:
name = s.Name
default:
return &Error{
code: ErrCodeUnknownError,
resolved: filepath.Join(dir, f.Name()),
err: errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String())),
}
}
p := filepath.Join(dir, name)
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
return nil
}
if err := fs.Writefile(p, f); err != nil {
return &Error{code: ErrCodeUnknownError, err: err, resolved: source}
}
return nil
})
if err != nil {
if strings.HasPrefix(err.Error(), "format ") {
return &Error{code: ErrCodeUnknownArchive}
}
return err
}
return nil
}

View File

@ -1,6 +1,7 @@
package filesystem package filesystem
import ( import (
"emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/karrick/godirwalk" "github.com/karrick/godirwalk"
"sync" "sync"
@ -189,7 +190,7 @@ func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
}, },
}) })
return size, err return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
} }
// Helper function to determine if a server has space available for a file of a given size. // Helper function to determine if a server has space available for a file of a given size.

View File

@ -11,6 +11,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"emperror.dev/errors" "emperror.dev/errors"
@ -89,12 +90,12 @@ func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
} }
// If the error is not because it doesn't exist then we just need to bail at this point. // If the error is not because it doesn't exist then we just need to bail at this point.
if !errors.Is(err, os.ErrNotExist) { if !errors.Is(err, os.ErrNotExist) {
return nil, err return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file handle")
} }
// Create the path leading up to the file we're trying to create, setting the final perms // Create the path leading up to the file we're trying to create, setting the final perms
// on it as we go. // on it as we go.
if err := os.MkdirAll(filepath.Dir(cleaned), 0755); err != nil { if err := os.MkdirAll(filepath.Dir(cleaned), 0755); err != nil {
return nil, err return nil, errors.Wrap(err, "server/filesystem: touch: failed to create directory tree")
} }
if err := fs.Chown(filepath.Dir(cleaned)); err != nil { if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
return nil, err return nil, err
@ -104,7 +105,7 @@ func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
// Chown that file so that the permissions don't mess with things. // Chown that file so that the permissions don't mess with things.
f, err = o.open(cleaned, flag, 0644) f, err = o.open(cleaned, flag, 0644)
if err != nil { if err != nil {
return nil, err return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file with wait")
} }
_ = fs.Chown(cleaned) _ = fs.Chown(cleaned)
return f, nil return f, nil
@ -124,7 +125,8 @@ func (fs *Filesystem) Readfile(p string, w io.Writer) error {
} }
// Writefile writes a file to the system. If the file does not already exist one // Writefile writes a file to the system. If the file does not already exist one
// will be created. // will be created. This will also properly recalculate the disk space used by
// the server when writing new files or modifying existing ones.
func (fs *Filesystem) Writefile(p string, r io.Reader) error { func (fs *Filesystem) Writefile(p string, r io.Reader) error {
cleaned, err := fs.SafePath(p) cleaned, err := fs.SafePath(p)
if err != nil { if err != nil {
@ -136,7 +138,7 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
// to it and an empty file. We'll then write to it later on after this completes. // to it and an empty file. We'll then write to it later on after this completes.
stat, err := os.Stat(cleaned) stat, err := os.Stat(cleaned)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return err return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
} else if err == nil { } else if err == nil {
if stat.IsDir() { if stat.IsDir() {
return &Error{code: ErrCodeIsDirectory, resolved: cleaned} return &Error{code: ErrCodeIsDirectory, resolved: cleaned}
@ -233,7 +235,7 @@ func (fs *Filesystem) Chown(path string) error {
// Start by just chowning the initial path that we received. // Start by just chowning the initial path that we received.
if err := os.Chown(cleaned, uid, gid); err != nil { if err := os.Chown(cleaned, uid, gid); err != nil {
return err return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
} }
// If this is not a directory we can now return from the function, there is nothing // If this is not a directory we can now return from the function, there is nothing
@ -244,7 +246,7 @@ func (fs *Filesystem) Chown(path string) error {
// If this was a directory, begin walking over its contents recursively and ensure that all // If this was a directory, begin walking over its contents recursively and ensure that all
// of the subfiles and directories get their permissions updated as well. // of the subfiles and directories get their permissions updated as well.
return godirwalk.Walk(cleaned, &godirwalk.Options{ err = godirwalk.Walk(cleaned, &godirwalk.Options{
Unsorted: true, Unsorted: true,
Callback: func(p string, e *godirwalk.Dirent) error { Callback: func(p string, e *godirwalk.Dirent) error {
// Do not attempt to chmod a symlink. Go's os.Chown function will affect the symlink // Do not attempt to chmod a symlink. Go's os.Chown function will affect the symlink
@ -261,6 +263,8 @@ func (fs *Filesystem) Chown(path string) error {
return os.Chown(p, uid, gid) return os.Chown(p, uid, gid)
}, },
}) })
return errors.Wrap(err, "server/filesystem: chown: failed to chown during walk function")
} }
func (fs *Filesystem) Chmod(path string, mode os.FileMode) error { func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
@ -365,8 +369,21 @@ func (fs *Filesystem) Copy(p string) error {
return fs.Writefile(path.Join(relative, n), source) return fs.Writefile(path.Join(relative, n), source)
} }
// Deletes a file or folder from the system. Prevents the user from accidentally // TruncateRootDirectory removes _all_ files and directories from a server's
// (or maliciously) removing their root server data directory. // data directory and resets the used disk space to zero.
func (fs *Filesystem) TruncateRootDirectory() error {
if err := os.RemoveAll(fs.Path()); err != nil {
return err
}
if err := os.Mkdir(fs.Path(), 0755); err != nil {
return err
}
atomic.StoreInt64(&fs.diskUsed, 0)
return nil
}
// Delete removes a file or folder from the system. Prevents the user from
// accidentally (or maliciously) removing their root server data directory.
func (fs *Filesystem) Delete(p string) error { func (fs *Filesystem) Delete(p string) error {
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
// This is one of the few (only?) places in the codebase where we're explicitly not using // This is one of the few (only?) places in the codebase where we're explicitly not using

View File

@ -7,6 +7,7 @@ import (
"strings" "strings"
"sync" "sync"
"emperror.dev/errors"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -41,7 +42,7 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
// is truly pointing to. // is truly pointing to.
ep, err := filepath.EvalSymlinks(r) ep, err := filepath.EvalSymlinks(r)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
return "", err return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
} else if os.IsNotExist(err) { } else if os.IsNotExist(err) {
// The requested directory doesn't exist, so at this point we need to iterate up the // The requested directory doesn't exist, so at this point we need to iterate up the
// path chain until we hit a directory that _does_ exist and can be validated. // path chain until we hit a directory that _does_ exist and can be validated.

View File

@ -1,18 +1,23 @@
package system package system
import ( import (
"archive/tar"
"archive/zip"
"bufio" "bufio"
"bytes" "bytes"
"compress/gzip"
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"reflect"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/mholt/archiver/v3"
) )
var cr = []byte(" \r") var cr = []byte(" \r")
@ -36,6 +41,22 @@ func MustInt(v string) int {
return i return i
} }
// ExtractArchiveSourceName looks for the provided archiver.File's name if it is
// a type that is supported, otherwise it returns an error to the caller.
func ExtractArchiveSourceName(f archiver.File, dir string) (name string, err error) {
switch s := f.Sys().(type) {
case *tar.Header:
name = s.Name
case *gzip.Header:
name = s.Name
case *zip.FileHeader:
name = s.Name
default:
err = errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String()))
}
return name, err
}
func ScanReader(r io.Reader, callback func(line string)) error { func ScanReader(r io.Reader, callback func(line string)) error {
br := bufio.NewReader(r) br := bufio.NewReader(r)
// Avoid constantly re-allocating memory when we're flooding lines through this // Avoid constantly re-allocating memory when we're flooding lines through this