2020-04-17 20:46:36 +00:00
|
|
|
package backup
|
|
|
|
|
2020-04-27 00:20:26 +00:00
|
|
|
import (
|
|
|
|
"fmt"
|
2020-10-31 23:47:41 +00:00
|
|
|
"github.com/pterodactyl/wings/api"
|
2020-12-25 19:52:57 +00:00
|
|
|
"github.com/pterodactyl/wings/server/filesystem"
|
2020-04-27 00:20:26 +00:00
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
|
|
|
"strconv"
|
|
|
|
)
|
|
|
|
|
2020-04-17 20:46:36 +00:00
|
|
|
type S3Backup struct {
|
2020-05-02 22:02:02 +00:00
|
|
|
Backup
|
2020-04-17 20:46:36 +00:00
|
|
|
}
|
|
|
|
|
2020-05-02 22:02:02 +00:00
|
|
|
var _ BackupInterface = (*S3Backup)(nil)
|
2020-04-17 20:46:36 +00:00
|
|
|
|
2020-12-28 00:16:40 +00:00
|
|
|
// Removes a backup from the system.
|
|
|
|
func (s *S3Backup) Remove() error {
|
|
|
|
return os.Remove(s.Path())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attaches additional context to the log output for this backup.
|
|
|
|
func (s *S3Backup) WithLogContext(c map[string]interface{}) {
|
|
|
|
s.logContext = c
|
|
|
|
}
|
|
|
|
|
2020-05-10 02:24:30 +00:00
|
|
|
// Generates a new backup on the disk, moves it into the S3 bucket via the provided
|
|
|
|
// presigned URL, and then deletes the backup from the disk.
|
2020-12-25 19:52:57 +00:00
|
|
|
func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
|
2020-04-27 00:20:26 +00:00
|
|
|
defer s.Remove()
|
2020-04-17 20:46:36 +00:00
|
|
|
|
2020-12-25 19:52:57 +00:00
|
|
|
a := &filesystem.Archive{
|
|
|
|
BasePath: basePath,
|
|
|
|
Ignore: ignore,
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
|
|
|
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Info("creating backup for server...")
|
2020-12-25 19:52:57 +00:00
|
|
|
if err := a.Create(s.Path()); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return nil, err
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Info("created backup successfully")
|
2020-04-27 00:20:26 +00:00
|
|
|
|
2020-05-10 02:24:30 +00:00
|
|
|
rc, err := os.Open(s.Path())
|
2020-04-27 00:20:26 +00:00
|
|
|
if err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return nil, err
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
|
|
|
defer rc.Close()
|
|
|
|
|
2020-10-31 23:47:41 +00:00
|
|
|
if err := s.generateRemoteRequest(rc); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return nil, err
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
|
|
|
|
2020-11-28 23:57:10 +00:00
|
|
|
return s.Details(), nil
|
2020-04-17 20:46:36 +00:00
|
|
|
}
|
|
|
|
|
2020-10-31 23:47:41 +00:00
|
|
|
// Reader provides a wrapper around an existing io.Reader
|
|
|
|
// but implements io.Closer in order to satisfy an io.ReadCloser.
|
|
|
|
type Reader struct {
|
|
|
|
io.Reader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (Reader) Close() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-10 02:24:30 +00:00
|
|
|
// Generates the remote S3 request and begins the upload.
|
2020-10-31 23:47:41 +00:00
|
|
|
func (s *S3Backup) generateRemoteRequest(rc io.ReadCloser) error {
|
|
|
|
defer rc.Close()
|
|
|
|
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Debug("attempting to get size of backup...")
|
2020-10-31 23:47:41 +00:00
|
|
|
size, err := s.Backup.Size()
|
2020-05-10 02:24:30 +00:00
|
|
|
if err != nil {
|
2020-10-31 23:47:41 +00:00
|
|
|
return err
|
2020-05-10 02:24:30 +00:00
|
|
|
}
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().WithField("size", size).Debug("got size of backup")
|
2020-05-10 02:24:30 +00:00
|
|
|
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
2020-10-31 23:47:41 +00:00
|
|
|
urls, err := api.New().GetBackupRemoteUploadURLs(s.Backup.Uuid, size)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().Debug("got S3 upload urls from the Panel")
|
|
|
|
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
2020-10-31 23:47:41 +00:00
|
|
|
|
|
|
|
handlePart := func(part string, size int64) (string, error) {
|
|
|
|
r, err := http.NewRequest(http.MethodPut, part, nil)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
r.ContentLength = size
|
|
|
|
r.Header.Add("Content-Length", strconv.Itoa(int(size)))
|
2020-05-10 02:24:30 +00:00
|
|
|
r.Header.Add("Content-Type", "application/x-gzip")
|
2020-10-31 23:47:41 +00:00
|
|
|
|
2020-11-01 17:30:25 +00:00
|
|
|
// Limit the reader to the size of the part.
|
2020-12-06 20:56:17 +00:00
|
|
|
r.Body = Reader{Reader: io.LimitReader(rc, size)}
|
2020-10-31 23:47:41 +00:00
|
|
|
|
2020-11-01 17:30:25 +00:00
|
|
|
// This http request can block forever due to it not having a timeout,
|
|
|
|
// but we are uploading up to 5GB of data, so there is not really
|
|
|
|
// a good way to handle a timeout on this.
|
2020-10-31 23:47:41 +00:00
|
|
|
res, err := http.DefaultClient.Do(r)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
|
|
|
|
2020-11-01 17:30:25 +00:00
|
|
|
// Handle non-200 status codes.
|
2020-10-31 23:47:41 +00:00
|
|
|
if res.StatusCode != http.StatusOK {
|
|
|
|
return "", fmt.Errorf("failed to put S3 object part, %d:%s", res.StatusCode, res.Status)
|
|
|
|
}
|
|
|
|
|
2020-11-01 17:30:25 +00:00
|
|
|
// Get the ETag from the uploaded part, this should be sent with the CompleteMultipartUpload request.
|
2020-10-31 23:47:41 +00:00
|
|
|
return res.Header.Get("ETag"), nil
|
2020-04-27 00:20:26 +00:00
|
|
|
}
|
2020-05-10 02:24:30 +00:00
|
|
|
|
2020-10-31 23:47:41 +00:00
|
|
|
for i, part := range urls.Parts {
|
2020-11-01 17:30:25 +00:00
|
|
|
// Get the size for the current part.
|
|
|
|
var partSize int64
|
2020-12-28 00:16:40 +00:00
|
|
|
if i+1 < len(urls.Parts) {
|
2020-11-01 17:30:25 +00:00
|
|
|
partSize = urls.PartSize
|
2020-10-31 23:47:41 +00:00
|
|
|
} else {
|
2020-11-01 17:30:25 +00:00
|
|
|
// This is the remaining size for the last part,
|
|
|
|
// there is not a minimum size limit for the last part.
|
|
|
|
partSize = size - (int64(i) * urls.PartSize)
|
2020-10-31 23:47:41 +00:00
|
|
|
}
|
|
|
|
|
2020-11-01 17:30:25 +00:00
|
|
|
// Attempt to upload the part.
|
2020-12-06 22:25:11 +00:00
|
|
|
if _, err := handlePart(part, partSize); err != nil {
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
2020-10-31 23:47:41 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-12-27 19:54:18 +00:00
|
|
|
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
2020-10-31 23:47:41 +00:00
|
|
|
}
|
|
|
|
|
2020-12-28 00:16:40 +00:00
|
|
|
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
2020-05-10 02:24:30 +00:00
|
|
|
|
2020-10-31 23:47:41 +00:00
|
|
|
return nil
|
2020-04-17 20:46:36 +00:00
|
|
|
}
|