Get general concept for backup resotration using a unified interface implemented
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
@@ -50,9 +51,9 @@ func (s *Server) getServerwideIgnoredFiles() (string, error) {
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
// Performs a server backup and then emits the event over the server websocket. We
|
||||
// let the actual backup system handle notifying the panel of the status, but that
|
||||
// won't emit a websocket event.
|
||||
// Backup performs a server backup and then emits the event over the server
|
||||
// websocket. We let the actual backup system handle notifying the panel of the
|
||||
// status, but that won't emit a websocket event.
|
||||
func (s *Server) Backup(b backup.BackupInterface) error {
|
||||
ignored := b.Ignored()
|
||||
if b.Ignored() == "" {
|
||||
@@ -108,3 +109,43 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreBackup calls the Restore function on the provided backup. Once this
|
||||
// restoration is completed an event is emitted to the websocket to notify the
|
||||
// Panel that is has been completed.
|
||||
//
|
||||
// In addition to the websocket event an API call is triggered to notify the
|
||||
// Panel of the new state.
|
||||
func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (err error) {
|
||||
s.Config().SetSuspended(true)
|
||||
// Local backups will not pass a reader through to this function, so check first
|
||||
// to make sure it is a valid reader before trying to close it.
|
||||
defer func() {
|
||||
s.Config().SetSuspended(false)
|
||||
if reader != nil {
|
||||
reader.Close()
|
||||
}
|
||||
}()
|
||||
// Don't try to restore the server until we have completely stopped the running
|
||||
// instance, otherwise you'll likely hit all types of write errors due to the
|
||||
// server being suspended.
|
||||
err = s.Environment.WaitForStop(120, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Send an API call to the Panel as soon as this function is done running so that
|
||||
// the Panel is informed of the restoration status of this backup.
|
||||
defer func() {
|
||||
if err := api.New().SendRestorationStatus(b.Identifier(), err == nil); err != nil {
|
||||
s.Log().WithField("error", err).WithField("backup", b.Identifier()).Error("failed to notify Panel of backup restoration status")
|
||||
}
|
||||
}()
|
||||
|
||||
// Attempt to restore the backup to the server by running through each entry
|
||||
// in the file one at a time and writing them to the disk.
|
||||
err = b.Restore(reader, func(file string, r io.Reader) error {
|
||||
return s.Filesystem().Writefile(file, r)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -20,6 +20,10 @@ const (
|
||||
S3BackupAdapter AdapterType = "s3"
|
||||
)
|
||||
|
||||
// RestoreCallback is a generic restoration callback that exists for both local
|
||||
// and remote backups allowing the files to be restored.
|
||||
type RestoreCallback func(file string, r io.Reader) error
|
||||
|
||||
type ArchiveDetails struct {
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
@@ -51,35 +55,33 @@ type Backup struct {
|
||||
|
||||
// noinspection GoNameStartsWithPackageName
|
||||
type BackupInterface interface {
|
||||
// Returns the UUID of this backup as tracked by the panel instance.
|
||||
// Identifier returns the UUID of this backup as tracked by the panel
|
||||
// instance.
|
||||
Identifier() string
|
||||
|
||||
// Attaches additional context to the log output for this backup.
|
||||
// WithLogContext attaches additional context to the log output for this
|
||||
// backup.
|
||||
WithLogContext(map[string]interface{})
|
||||
|
||||
// Generates a backup in whatever the configured source for the specific
|
||||
// implementation is.
|
||||
// Generate creates a backup in whatever the configured source for the
|
||||
// specific implementation is.
|
||||
Generate(string, string) (*ArchiveDetails, error)
|
||||
|
||||
// Returns the ignored files for this backup instance.
|
||||
// Ignored returns the ignored files for this backup instance.
|
||||
Ignored() string
|
||||
|
||||
// Returns a SHA1 checksum for the generated backup.
|
||||
// Checksum returns a SHA1 checksum for the generated backup.
|
||||
Checksum() ([]byte, error)
|
||||
|
||||
// Returns the size of the generated backup.
|
||||
// Size returns the size of the generated backup.
|
||||
Size() (int64, error)
|
||||
|
||||
// Returns the path to the backup on the machine. This is not always the final
|
||||
// storage location of the backup, simply the location we're using to store
|
||||
// it until it is moved to the final spot.
|
||||
// Path returns the path to the backup on the machine. This is not always
|
||||
// the final storage location of the backup, simply the location we're using
|
||||
// to store it until it is moved to the final spot.
|
||||
Path() string
|
||||
|
||||
// Returns details about the archive.
|
||||
// Details returns details about the archive.
|
||||
Details() *ArchiveDetails
|
||||
|
||||
// Removes a backup file.
|
||||
// Remove removes a backup file.
|
||||
Remove() error
|
||||
// Restore is called when a backup is ready to be restored to the disk from
|
||||
// the given source. Not every backup implementation will support this nor
|
||||
// will every implementation require a reader be provided.
|
||||
Restore(reader io.Reader, callback RestoreCallback) error
|
||||
}
|
||||
|
||||
func (b *Backup) Identifier() string {
|
||||
|
||||
@@ -2,7 +2,11 @@ package backup
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
type LocalBackup struct {
|
||||
@@ -70,3 +74,17 @@ func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error)
|
||||
return b.Details(), nil
|
||||
}
|
||||
|
||||
// Restore will walk over the archive and call the callback function for each
|
||||
// file encountered.
|
||||
func (b *LocalBackup) Restore(_ io.Reader, callback RestoreCallback) error {
|
||||
return archiver.Walk(b.Path(), func(f archiver.File) error {
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name, err := system.ExtractArchiveSourceName(f, "/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(name, f)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
)
|
||||
|
||||
type S3Backup struct {
|
||||
@@ -149,3 +153,40 @@ func (s *S3Backup) generateRemoteRequest(rc io.ReadCloser) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restore will read from the provided reader assuming that it is a gzipped
|
||||
// tar reader. When a file is encountered in the archive the callback function
|
||||
// will be triggered. If the callback returns an error the entire process is
|
||||
// stopped, otherwise this function will run until all files have been written.
|
||||
//
|
||||
// This restoration uses a workerpool to use up to the number of CPUs available
|
||||
// on the machine when writing files to the disk.
|
||||
func (s *S3Backup) Restore(r io.Reader, callback RestoreCallback) error {
|
||||
reader := r
|
||||
// Steal the logic we use for making backups which will be applied when restoring
|
||||
// this specific backup. This allows us to prevent overloading the disk unintentionally.
|
||||
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
||||
reader = ratelimit.Reader(r, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
||||
}
|
||||
gr, err := gzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gr.Close()
|
||||
tr := tar.NewReader(gr)
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
if err := callback(header.Name, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -115,12 +115,10 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
|
||||
// and then extract that file from the archive and write it to the disk. If any part of this process
|
||||
// encounters an error the entire process will be stopped.
|
||||
// Walk all of the files in the archiver file and write them to the disk. If any
|
||||
// directory is encountered it will be skipped since we handle creating any missing
|
||||
// directories automatically when writing files.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
// Don't waste time with directories, we don't need to create them if they have no contents, and
|
||||
// we will ensure the directory exists when opening the file for writing anyways.
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user