Abstract out shared backup functionality
This commit is contained in:
parent
507d0100cf
commit
b2797ed292
|
@ -17,7 +17,7 @@ func postServerBackup(c *gin.Context) {
|
|||
data := &backup.Request{}
|
||||
c.BindJSON(&data)
|
||||
|
||||
var adapter backup.Backup
|
||||
var adapter backup.BackupInterface
|
||||
var err error
|
||||
|
||||
switch data.Adapter {
|
||||
|
@ -35,7 +35,7 @@ func postServerBackup(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
go func(b backup.Backup, serv *server.Server) {
|
||||
go func(b backup.BackupInterface, serv *server.Server) {
|
||||
if err := serv.Backup(b); err != nil {
|
||||
zap.S().Errorw("failed to generate backup for server", zap.Error(err))
|
||||
}
|
||||
|
|
|
@ -79,14 +79,14 @@ func (s *Server) GetIncludedBackupFiles(ignored []string) (*backup.IncludedFiles
|
|||
// Performs a server backup and then emits the event over the server websocket. We
|
||||
// let the actual backup system handle notifying the panel of the status, but that
|
||||
// won't emit a websocket event.
|
||||
func (s *Server) Backup(b backup.Backup) error {
|
||||
func (s *Server) Backup(b backup.BackupInterface) error {
|
||||
// Get the included files based on the root path and the ignored files provided.
|
||||
inc, err := s.GetIncludedBackupFiles(b.Ignored())
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := b.Backup(inc, s.Filesystem.Path()); err != nil {
|
||||
if err := b.Generate(inc, s.Filesystem.Path()); err != nil {
|
||||
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
|
||||
zap.S().Warnw("failed to notify panel of failed backup state", zap.String("backup", b.Identifier()), zap.Error(err))
|
||||
}
|
||||
|
|
|
@ -1,9 +1,16 @@
|
|||
package backup
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"go.uber.org/zap"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -11,49 +18,38 @@ const (
|
|||
S3BackupAdapter = "s3"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Adapter string `json:"adapter"`
|
||||
Uuid string `json:"uuid"`
|
||||
type ArchiveDetails struct {
|
||||
Checksum string `json:"checksum"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// Returns a request object.
|
||||
func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
|
||||
return api.BackupRequest{
|
||||
Checksum: ad.Checksum,
|
||||
Size: ad.Size,
|
||||
Successful: successful,
|
||||
}
|
||||
}
|
||||
|
||||
type Backup struct {
|
||||
// The UUID of this backup object. This must line up with a backup from
|
||||
// the panel instance.
|
||||
Uuid string `json:"uuid"`
|
||||
|
||||
// An array of files to ignore when generating this backup. This should be
|
||||
// compatible with a standard .gitignore structure.
|
||||
IgnoredFiles []string `json:"ignored_files"`
|
||||
PresignedUrl string `json:"presigned_url"`
|
||||
}
|
||||
|
||||
// Generates a new local backup struct.
|
||||
func (r *Request) NewLocalBackup() (*LocalBackup, error) {
|
||||
if r.Adapter != LocalBackupAdapter {
|
||||
return nil, errors.New(fmt.Sprintf("cannot create local backup using [%s] adapter", r.Adapter))
|
||||
}
|
||||
|
||||
return &LocalBackup{
|
||||
Uuid: r.Uuid,
|
||||
IgnoredFiles: r.IgnoredFiles,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Generates a new S3 backup struct.
|
||||
func (r *Request) NewS3Backup() (*S3Backup, error) {
|
||||
if r.Adapter != S3BackupAdapter {
|
||||
return nil, errors.New(fmt.Sprintf("cannot create s3 backup using [%s] adapter", r.Adapter))
|
||||
}
|
||||
|
||||
if len(r.PresignedUrl) == 0 {
|
||||
return nil, errors.New("a valid presigned S3 upload URL must be provided to use the [s3] adapter")
|
||||
}
|
||||
|
||||
return &S3Backup{
|
||||
Uuid: r.Uuid,
|
||||
IgnoredFiles: r.IgnoredFiles,
|
||||
PresignedUrl: r.PresignedUrl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Backup interface {
|
||||
// noinspection GoNameStartsWithPackageName
|
||||
type BackupInterface interface {
|
||||
// Returns the UUID of this backup as tracked by the panel instance.
|
||||
Identifier() string
|
||||
|
||||
// Generates a backup in whatever the configured source for the specific
|
||||
// implementation is.
|
||||
Backup(*IncludedFiles, string) error
|
||||
Generate(*IncludedFiles, string) error
|
||||
|
||||
// Returns the ignored files for this backup instance.
|
||||
Ignored() []string
|
||||
|
@ -76,16 +72,80 @@ type Backup interface {
|
|||
Remove() error
|
||||
}
|
||||
|
||||
type ArchiveDetails struct {
|
||||
Checksum string `json:"checksum"`
|
||||
Size int64 `json:"size"`
|
||||
func (b *Backup) Identifier() string {
|
||||
return b.Uuid
|
||||
}
|
||||
|
||||
// Returns a request object.
|
||||
func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
|
||||
return api.BackupRequest{
|
||||
Checksum: ad.Checksum,
|
||||
Size: ad.Size,
|
||||
Successful: successful,
|
||||
// Returns the path for this specific backup.
|
||||
func (b *Backup) Path() string {
|
||||
return path.Join(config.Get().System.BackupDirectory, b.Identifier()+".tar.gz")
|
||||
}
|
||||
|
||||
// Return the size of the generated backup.
|
||||
func (b *Backup) Size() (int64, error) {
|
||||
st, err := os.Stat(b.Path())
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return st.Size(), nil
|
||||
}
|
||||
|
||||
// Returns the SHA256 checksum of a backup.
|
||||
func (b *Backup) Checksum() ([]byte, error) {
|
||||
h := sha256.New()
|
||||
|
||||
f, err := os.Open(b.Path())
|
||||
if err != nil {
|
||||
return []byte{}, errors.WithStack(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return []byte{}, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// Returns details of the archive by utilizing two go-routines to get the checksum and
|
||||
// the size of the archive.
|
||||
func (b *Backup) Details() *ArchiveDetails {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
var checksum string
|
||||
// Calculate the checksum for the file.
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
resp, err := b.Checksum()
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to calculate checksum for backup", zap.String("backup", b.Uuid), zap.Error(err))
|
||||
}
|
||||
|
||||
checksum = hex.EncodeToString(resp)
|
||||
}()
|
||||
|
||||
var sz int64
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
if s, err := b.Size(); err != nil {
|
||||
return
|
||||
} else {
|
||||
sz = s
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return &ArchiveDetails{
|
||||
Checksum: checksum,
|
||||
Size: sz,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Backup) Ignored() []string {
|
||||
return b.IgnoredFiles
|
||||
}
|
|
@ -2,35 +2,24 @@ package backup
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"go.uber.org/zap"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type LocalBackup struct {
|
||||
// The UUID of this backup object. This must line up with a backup from
|
||||
// the panel instance.
|
||||
Uuid string `json:"uuid"`
|
||||
|
||||
// An array of files to ignore when generating this backup. This should be
|
||||
// compatible with a standard .gitignore structure.
|
||||
IgnoredFiles []string `json:"ignored_files"`
|
||||
Backup
|
||||
}
|
||||
|
||||
var _ Backup = (*LocalBackup)(nil)
|
||||
var _ BackupInterface = (*LocalBackup)(nil)
|
||||
|
||||
// Locates the backup for a server and returns the local path. This will obviously only
|
||||
// work if the backup was created as a local backup.
|
||||
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
||||
b := &LocalBackup{
|
||||
Uuid: uuid,
|
||||
IgnoredFiles: nil,
|
||||
Backup{
|
||||
Uuid: uuid,
|
||||
IgnoredFiles: nil,
|
||||
},
|
||||
}
|
||||
|
||||
st, err := os.Stat(b.Path())
|
||||
|
@ -45,32 +34,6 @@ func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
|||
return b, st, nil
|
||||
}
|
||||
|
||||
func (b *LocalBackup) Identifier() string {
|
||||
return b.Uuid
|
||||
}
|
||||
|
||||
// Returns the path for this specific backup.
|
||||
func (b *LocalBackup) Path() string {
|
||||
return path.Join(config.Get().System.BackupDirectory, b.Uuid+".tar.gz")
|
||||
}
|
||||
|
||||
// Returns the SHA256 checksum of a backup.
|
||||
func (b *LocalBackup) Checksum() ([]byte, error) {
|
||||
h := sha256.New()
|
||||
|
||||
f, err := os.Open(b.Path())
|
||||
if err != nil {
|
||||
return []byte{}, errors.WithStack(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return []byte{}, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// Removes a backup from the system.
|
||||
func (b *LocalBackup) Remove() error {
|
||||
return os.Remove(b.Path())
|
||||
|
@ -78,7 +41,7 @@ func (b *LocalBackup) Remove() error {
|
|||
|
||||
// Generates a backup of the selected files and pushes it to the defined location
|
||||
// for this instance.
|
||||
func (b *LocalBackup) Backup(included *IncludedFiles, prefix string) error {
|
||||
func (b *LocalBackup) Generate(included *IncludedFiles, prefix string) error {
|
||||
a := &Archive{
|
||||
TrimPrefix: prefix,
|
||||
Files: included,
|
||||
|
@ -88,55 +51,3 @@ func (b *LocalBackup) Backup(included *IncludedFiles, prefix string) error {
|
|||
|
||||
return err
|
||||
}
|
||||
|
||||
// Return the size of the generated backup.
|
||||
func (b *LocalBackup) Size() (int64, error) {
|
||||
st, err := os.Stat(b.Path())
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return st.Size(), nil
|
||||
}
|
||||
|
||||
// Returns details of the archive by utilizing two go-routines to get the checksum and
|
||||
// the size of the archive.
|
||||
func (b *LocalBackup) Details() *ArchiveDetails {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
var checksum string
|
||||
// Calculate the checksum for the file.
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
resp, err := b.Checksum()
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to calculate checksum for backup", zap.String("backup", b.Uuid), zap.Error(err))
|
||||
}
|
||||
|
||||
checksum = hex.EncodeToString(resp)
|
||||
}()
|
||||
|
||||
var sz int64
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
if s, err := b.Size(); err != nil {
|
||||
return
|
||||
} else {
|
||||
sz = s
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return &ArchiveDetails{
|
||||
Checksum: checksum,
|
||||
Size: sz,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LocalBackup) Ignored() []string {
|
||||
return b.IgnoredFiles
|
||||
}
|
46
server/backup/backup_request.go
Normal file
46
server/backup/backup_request.go
Normal file
|
@ -0,0 +1,46 @@
|
|||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Adapter string `json:"adapter"`
|
||||
Uuid string `json:"uuid"`
|
||||
IgnoredFiles []string `json:"ignored_files"`
|
||||
PresignedUrl string `json:"presigned_url"`
|
||||
}
|
||||
|
||||
// Generates a new local backup struct.
|
||||
func (r *Request) NewLocalBackup() (*LocalBackup, error) {
|
||||
if r.Adapter != LocalBackupAdapter {
|
||||
return nil, errors.New(fmt.Sprintf("cannot create local backup using [%s] adapter", r.Adapter))
|
||||
}
|
||||
|
||||
return &LocalBackup{
|
||||
Backup{
|
||||
Uuid: r.Uuid,
|
||||
IgnoredFiles: r.IgnoredFiles,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Generates a new S3 backup struct.
|
||||
func (r *Request) NewS3Backup() (*S3Backup, error) {
|
||||
if r.Adapter != S3BackupAdapter {
|
||||
return nil, errors.New(fmt.Sprintf("cannot create s3 backup using [%s] adapter", r.Adapter))
|
||||
}
|
||||
|
||||
if len(r.PresignedUrl) == 0 {
|
||||
return nil, errors.New("a valid presigned S3 upload URL must be provided to use the [s3] adapter")
|
||||
}
|
||||
|
||||
return &S3Backup{
|
||||
Backup: Backup{
|
||||
Uuid: r.Uuid,
|
||||
IgnoredFiles: r.IgnoredFiles,
|
||||
},
|
||||
PresignedUrl: r.PresignedUrl,
|
||||
}, nil
|
||||
}
|
|
@ -2,25 +2,15 @@ package backup
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type S3Backup struct {
|
||||
// The UUID of this backup object. This must line up with a backup from
|
||||
// the panel instance.
|
||||
Uuid string
|
||||
|
||||
// An array of files to ignore when generating this backup. This should be
|
||||
// compatible with a standard .gitignore structure.
|
||||
IgnoredFiles []string
|
||||
Backup
|
||||
|
||||
// The pre-signed upload endpoint for the generated backup. This must be
|
||||
// provided otherwise this request will fail. This allows us to keep all
|
||||
|
@ -29,13 +19,9 @@ type S3Backup struct {
|
|||
PresignedUrl string
|
||||
}
|
||||
|
||||
var _ Backup = (*S3Backup)(nil)
|
||||
var _ BackupInterface = (*S3Backup)(nil)
|
||||
|
||||
func (s *S3Backup) Identifier() string {
|
||||
return s.Uuid
|
||||
}
|
||||
|
||||
func (s *S3Backup) Backup(included *IncludedFiles, prefix string) error {
|
||||
func (s *S3Backup) Generate(included *IncludedFiles, prefix string) error {
|
||||
defer s.Remove()
|
||||
|
||||
a := &Archive{
|
||||
|
@ -85,39 +71,6 @@ func (s *S3Backup) Backup(included *IncludedFiles, prefix string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Return the size of the generated backup.
|
||||
func (s *S3Backup) Size() (int64, error) {
|
||||
st, err := os.Stat(s.Path())
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return st.Size(), nil
|
||||
}
|
||||
|
||||
// Returns the path for this specific backup. S3 backups are only stored on the disk
|
||||
// long enough for us to get the details we need before uploading them to S3.
|
||||
func (s *S3Backup) Path() string {
|
||||
return path.Join(config.Get().System.BackupDirectory, s.Uuid+".tmp")
|
||||
}
|
||||
|
||||
// Returns the SHA256 checksum of a backup.
|
||||
func (s *S3Backup) Checksum() ([]byte, error) {
|
||||
h := sha256.New()
|
||||
|
||||
f, err := os.Open(s.Path())
|
||||
if err != nil {
|
||||
return []byte{}, errors.WithStack(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return []byte{}, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// Removes a backup from the system.
|
||||
func (s *S3Backup) Remove() error {
|
||||
return os.Remove(s.Path())
|
||||
|
@ -129,7 +82,3 @@ func (s *S3Backup) Details() *ArchiveDetails {
|
|||
Size: 1024,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S3Backup) Ignored() []string {
|
||||
return s.IgnoredFiles
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user