2020-04-17 20:46:36 +00:00
|
|
|
package backup
|
|
|
|
|
2020-04-27 00:20:26 +00:00
|
|
|
import (
|
2021-02-02 05:28:46 +00:00
|
|
|
"context"
|
2020-04-27 00:20:26 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"strconv"
|
2021-05-02 19:28:36 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"emperror.dev/errors"
|
|
|
|
"github.com/cenkalti/backoff/v4"
|
2021-01-19 05:20:58 +00:00
|
|
|
"github.com/juju/ratelimit"
|
2022-11-06 20:38:30 +00:00
|
|
|
"github.com/mholt/archiver/v4"
|
2021-07-15 21:37:38 +00:00
|
|
|
|
2021-01-19 05:20:58 +00:00
|
|
|
"github.com/pterodactyl/wings/config"
|
2021-02-02 05:28:46 +00:00
|
|
|
"github.com/pterodactyl/wings/remote"
|
2022-11-06 20:38:30 +00:00
|
|
|
"github.com/pterodactyl/wings/server/filesystem"
|
2020-04-27 00:20:26 +00:00
|
|
|
)
|
|
|
|
|
2020-04-17 20:46:36 +00:00
|
|
|
type S3Backup struct {
|
2020-05-02 22:02:02 +00:00
|
|
|
Backup
|
2020-04-17 20:46:36 +00:00
|
|
|
}
|
|
|
|
|
2020-05-02 22:02:02 +00:00
|
|
|
var _ BackupInterface = (*S3Backup)(nil)
|
2020-04-17 20:46:36 +00:00
|
|
|
|
2021-02-02 05:28:46 +00:00
|
|
|
func NewS3(client remote.Client, uuid string, ignore string) *S3Backup {
|
2021-01-18 05:05:51 +00:00
|
|
|
return &S3Backup{
|
|
|
|
Backup{
|
2021-02-02 05:28:46 +00:00
|
|
|
client: client,
|
2021-01-18 05:05:51 +00:00
|
|
|
Uuid: uuid,
|
|
|
|
Ignore: ignore,
|
|
|
|
adapter: S3BackupAdapter,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove removes a backup from the system.
|
2020-12-28 00:16:40 +00:00
|
|
|
func (s *S3Backup) Remove() error {
|
|
|
|
return os.Remove(s.Path())
|
|
|
|
}
|
|
|
|
|
2021-01-18 05:05:51 +00:00
|
|
|
// WithLogContext attaches additional context to the log output for this backup.
|
2020-12-28 00:16:40 +00:00
|
|
|
func (s *S3Backup) WithLogContext(c map[string]interface{}) {
|
|
|
|
s.logContext = c
|
|
|
|
}
|
|
|
|
|
2021-01-18 05:05:51 +00:00
|
|
|
// Generate creates a new backup on the disk, moves it into the S3 bucket via
|
|
|
|
// the provided presigned URL, and then deletes the backup from the disk.
|
2021-05-02 19:28:36 +00:00
|
|
|
func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
|
2020-04-27 00:20:26 +00:00
|
|
|
defer s.Remove()
|
2020-04-17 20:46:36 +00:00
|
|
|
|
2021-03-07 18:02:03 +00:00
|
|
|
a := &filesystem.Archive{
|
2020-12-25 19:52:57 +00:00
|
|
|
BasePath: basePath,
|
|
|
|
Ignore: ignore,
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
|
|
|
|
2021-05-02 19:28:36 +00:00
|
|
|
s.log().WithField("path", s.Path()).Info("creating backup for server")
|
2020-12-25 19:52:57 +00:00
|
|
|
if err := a.Create(s.Path()); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return nil, err
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Info("created backup successfully")
|
2020-04-27 00:20:26 +00:00
|
|
|
|
2020-05-10 02:24:30 +00:00
|
|
|
rc, err := os.Open(s.Path())
|
2020-04-27 00:20:26 +00:00
|
|
|
if err != nil {
|
2021-05-02 19:28:36 +00:00
|
|
|
return nil, errors.Wrap(err, "backup: could not read archive from disk")
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
|
|
|
defer rc.Close()
|
|
|
|
|
2022-09-26 17:14:57 +00:00
|
|
|
parts, err := s.generateRemoteRequest(ctx, rc)
|
|
|
|
if err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return nil, err
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
2022-09-26 17:14:57 +00:00
|
|
|
ad, err := s.Details(ctx, parts)
|
2021-05-02 19:28:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
|
|
|
|
}
|
|
|
|
return ad, nil
|
2020-10-31 23:47:41 +00:00
|
|
|
}
|
|
|
|
|
2021-05-02 19:28:36 +00:00
|
|
|
// Restore will read from the provided reader assuming that it is a gzipped
|
|
|
|
// tar reader. When a file is encountered in the archive the callback function
|
|
|
|
// will be triggered. If the callback returns an error the entire process is
|
|
|
|
// stopped, otherwise this function will run until all files have been written.
|
|
|
|
//
|
|
|
|
// This restoration uses a workerpool to use up to the number of CPUs available
|
|
|
|
// on the machine when writing files to the disk.
|
|
|
|
func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCallback) error {
|
|
|
|
reader := r
|
|
|
|
// Steal the logic we use for making backups which will be applied when restoring
|
|
|
|
// this specific backup. This allows us to prevent overloading the disk unintentionally.
|
|
|
|
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
|
|
|
reader = ratelimit.Reader(r, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
|
|
|
}
|
2022-11-06 20:38:30 +00:00
|
|
|
if err := format.Extract(ctx, reader, nil, func(ctx context.Context, f archiver.File) error {
|
|
|
|
r, err := f.Open()
|
2021-05-02 19:28:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-11-06 20:38:30 +00:00
|
|
|
defer r.Close()
|
|
|
|
|
|
|
|
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
2021-05-02 19:28:36 +00:00
|
|
|
}
|
2020-10-31 23:47:41 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-10 02:24:30 +00:00
|
|
|
// Generates the remote S3 request and begins the upload.
|
2022-09-26 17:14:57 +00:00
|
|
|
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) ([]remote.BackupPart, error) {
|
2020-10-31 23:47:41 +00:00
|
|
|
defer rc.Close()
|
|
|
|
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Debug("attempting to get size of backup...")
|
2020-10-31 23:47:41 +00:00
|
|
|
size, err := s.Backup.Size()
|
2020-05-10 02:24:30 +00:00
|
|
|
if err != nil {
|
2022-09-26 17:14:57 +00:00
|
|
|
return nil, err
|
2020-05-10 02:24:30 +00:00
|
|
|
}
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().WithField("size", size).Debug("got size of backup")
|
2020-05-10 02:24:30 +00:00
|
|
|
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
2021-02-02 05:28:46 +00:00
|
|
|
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
2020-10-31 23:47:41 +00:00
|
|
|
if err != nil {
|
2022-09-26 17:14:57 +00:00
|
|
|
return nil, err
|
2020-10-31 23:47:41 +00:00
|
|
|
}
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Debug("got S3 upload urls from the Panel")
|
|
|
|
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
2020-10-31 23:47:41 +00:00
|
|
|
|
2021-05-02 19:28:36 +00:00
|
|
|
uploader := newS3FileUploader(rc)
|
2020-10-31 23:47:41 +00:00
|
|
|
for i, part := range urls.Parts {
|
2020-11-01 17:30:25 +00:00
|
|
|
// Get the size for the current part.
|
|
|
|
var partSize int64
|
2020-12-28 00:16:40 +00:00
|
|
|
if i+1 < len(urls.Parts) {
|
2020-11-01 17:30:25 +00:00
|
|
|
partSize = urls.PartSize
|
2020-10-31 23:47:41 +00:00
|
|
|
} else {
|
2020-11-01 17:30:25 +00:00
|
|
|
// This is the remaining size for the last part,
|
|
|
|
// there is not a minimum size limit for the last part.
|
|
|
|
partSize = size - (int64(i) * urls.PartSize)
|
2020-10-31 23:47:41 +00:00
|
|
|
}
|
|
|
|
|
2020-11-01 17:30:25 +00:00
|
|
|
// Attempt to upload the part.
|
2022-09-26 17:14:57 +00:00
|
|
|
etag, err := uploader.uploadPart(ctx, part, partSize)
|
|
|
|
if err != nil {
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
2022-09-26 17:14:57 +00:00
|
|
|
return nil, err
|
2020-10-31 23:47:41 +00:00
|
|
|
}
|
2022-09-26 17:14:57 +00:00
|
|
|
uploader.uploadedParts = append(uploader.uploadedParts, remote.BackupPart{
|
|
|
|
ETag: etag,
|
|
|
|
PartNumber: i + 1,
|
|
|
|
})
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
2020-10-31 23:47:41 +00:00
|
|
|
}
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
2020-05-10 02:24:30 +00:00
|
|
|
|
2022-09-26 17:14:57 +00:00
|
|
|
return uploader.uploadedParts, nil
|
2020-04-17 20:46:36 +00:00
|
|
|
}
|
2021-01-19 05:20:58 +00:00
|
|
|
|
2021-05-02 19:28:36 +00:00
|
|
|
type s3FileUploader struct {
|
|
|
|
io.ReadCloser
|
2022-09-26 17:14:57 +00:00
|
|
|
client *http.Client
|
|
|
|
uploadedParts []remote.BackupPart
|
2021-05-02 19:28:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newS3FileUploader returns a new file uploader instance.
|
|
|
|
func newS3FileUploader(file io.ReadCloser) *s3FileUploader {
|
|
|
|
return &s3FileUploader{
|
|
|
|
ReadCloser: file,
|
|
|
|
// We purposefully use a super high timeout on this request since we need to upload
|
|
|
|
// a 5GB file. This assumes at worst a 10Mbps connection for uploading. While technically
|
|
|
|
// you could go slower we're targeting mostly hosted servers that should have 100Mbps
|
|
|
|
// connections anyways.
|
|
|
|
client: &http.Client{Timeout: time.Hour * 2},
|
2021-01-19 05:20:58 +00:00
|
|
|
}
|
2021-05-02 19:28:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// backoff returns a new expoential backoff implementation using a context that
|
|
|
|
// will also stop the backoff if it is canceled.
|
|
|
|
func (fu *s3FileUploader) backoff(ctx context.Context) backoff.BackOffContext {
|
|
|
|
b := backoff.NewExponentialBackOff()
|
|
|
|
b.Multiplier = 2
|
|
|
|
b.MaxElapsedTime = time.Minute
|
|
|
|
|
|
|
|
return backoff.WithContext(b, ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// uploadPart attempts to upload a given S3 file part to the S3 system. If a
|
|
|
|
// 5xx error is returned from the endpoint this will continue with an exponential
|
|
|
|
// backoff to try and successfully upload the part.
|
|
|
|
//
|
|
|
|
// Once uploaded the ETag is returned to the caller.
|
|
|
|
func (fu *s3FileUploader) uploadPart(ctx context.Context, part string, size int64) (string, error) {
|
|
|
|
r, err := http.NewRequestWithContext(ctx, http.MethodPut, part, nil)
|
2021-01-19 05:20:58 +00:00
|
|
|
if err != nil {
|
2021-05-02 19:28:36 +00:00
|
|
|
return "", errors.Wrap(err, "backup: could not create request for S3")
|
2021-01-19 05:20:58 +00:00
|
|
|
}
|
2021-05-02 19:28:36 +00:00
|
|
|
|
|
|
|
r.ContentLength = size
|
|
|
|
r.Header.Add("Content-Length", strconv.Itoa(int(size)))
|
|
|
|
r.Header.Add("Content-Type", "application/x-gzip")
|
|
|
|
|
|
|
|
// Limit the reader to the size of the part.
|
|
|
|
r.Body = Reader{Reader: io.LimitReader(fu.ReadCloser, size)}
|
|
|
|
|
|
|
|
var etag string
|
|
|
|
err = backoff.Retry(func() error {
|
|
|
|
res, err := fu.client.Do(r)
|
2021-01-19 05:20:58 +00:00
|
|
|
if err != nil {
|
2021-05-02 19:28:36 +00:00
|
|
|
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
|
|
|
|
return backoff.Permanent(err)
|
2021-01-19 05:20:58 +00:00
|
|
|
}
|
2021-05-02 19:28:36 +00:00
|
|
|
// Don't use a permanent error here, if there is a temporary resolution error with
|
|
|
|
// the URL due to DNS issues we want to keep re-trying.
|
|
|
|
return errors.Wrap(err, "backup: S3 HTTP request failed")
|
2021-01-19 05:20:58 +00:00
|
|
|
}
|
2021-05-02 19:28:36 +00:00
|
|
|
_ = res.Body.Close()
|
|
|
|
|
|
|
|
if res.StatusCode != http.StatusOK {
|
|
|
|
err := errors.New(fmt.Sprintf("backup: failed to put S3 object: [HTTP/%d] %s", res.StatusCode, res.Status))
|
|
|
|
// Only attempt a backoff retry if this error is because of a 5xx error from
|
|
|
|
// the S3 endpoint. Any 4xx error should be treated as an error that a retry
|
|
|
|
// would not fix.
|
|
|
|
if res.StatusCode >= http.StatusInternalServerError {
|
2021-01-19 05:20:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-05-02 19:28:36 +00:00
|
|
|
return backoff.Permanent(err)
|
2021-01-19 05:20:58 +00:00
|
|
|
}
|
2021-05-02 19:28:36 +00:00
|
|
|
|
|
|
|
// Get the ETag from the uploaded part, this should be sent with the
|
|
|
|
// CompleteMultipartUpload request.
|
|
|
|
etag = res.Header.Get("ETag")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}, fu.backoff(ctx))
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if v, ok := err.(*backoff.PermanentError); ok {
|
|
|
|
return "", v.Unwrap()
|
|
|
|
}
|
|
|
|
return "", err
|
2021-01-19 05:20:58 +00:00
|
|
|
}
|
2021-05-02 19:28:36 +00:00
|
|
|
return etag, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reader provides a wrapper around an existing io.Reader
|
|
|
|
// but implements io.Closer in order to satisfy an io.ReadCloser.
|
|
|
|
type Reader struct {
|
|
|
|
io.Reader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (Reader) Close() error {
|
2021-01-19 05:20:58 +00:00
|
|
|
return nil
|
|
|
|
}
|