2020-04-19 06:26:23 +00:00
|
|
|
package backup
|
|
|
|
|
|
|
|
import (
|
|
|
|
"archive/tar"
|
|
|
|
"context"
|
2020-06-13 17:40:26 +00:00
|
|
|
"github.com/apex/log"
|
2020-04-19 06:26:23 +00:00
|
|
|
gzip "github.com/klauspost/pgzip"
|
|
|
|
"github.com/remeh/sizedwaitgroup"
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Archive struct {
|
|
|
|
sync.Mutex
|
|
|
|
|
|
|
|
TrimPrefix string
|
|
|
|
Files *IncludedFiles
|
|
|
|
}
|
|
|
|
|
2020-07-15 19:11:12 +00:00
|
|
|
// Creates an archive at dst with all of the files defined in the included files struct.
|
|
|
|
func (a *Archive) Create(dst string, ctx context.Context) (os.FileInfo, error) {
|
|
|
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
2020-04-19 06:26:23 +00:00
|
|
|
if err != nil {
|
2020-07-11 20:13:49 +00:00
|
|
|
return nil, err
|
2020-04-19 06:26:23 +00:00
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
gzw := gzip.NewWriter(f)
|
|
|
|
defer gzw.Close()
|
|
|
|
|
|
|
|
tw := tar.NewWriter(gzw)
|
|
|
|
defer tw.Close()
|
|
|
|
|
|
|
|
wg := sizedwaitgroup.New(10)
|
|
|
|
g, ctx := errgroup.WithContext(ctx)
|
|
|
|
// Iterate over all of the files to be included and put them into the archive. This is
|
|
|
|
// done as a concurrent goroutine to speed things along. If an error is encountered at
|
|
|
|
// any step, the entire process is aborted.
|
|
|
|
for p, s := range a.Files.All() {
|
|
|
|
if (*s).IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
pa := p
|
|
|
|
st := s
|
|
|
|
|
|
|
|
g.Go(func() error {
|
|
|
|
wg.Add()
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
default:
|
|
|
|
return a.addToArchive(pa, st, tw)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Block until the entire routine is completed.
|
|
|
|
if err := g.Wait(); err != nil {
|
|
|
|
f.Close()
|
|
|
|
|
|
|
|
// Attempt to remove the archive if there is an error, report that error to
|
|
|
|
// the logger if it fails.
|
2020-07-15 19:11:12 +00:00
|
|
|
if rerr := os.Remove(dst); rerr != nil && !os.IsNotExist(rerr) {
|
|
|
|
log.WithField("location", dst).Warn("failed to delete corrupted backup archive")
|
2020-04-19 06:26:23 +00:00
|
|
|
}
|
|
|
|
|
2020-07-11 20:13:49 +00:00
|
|
|
return nil, err
|
2020-04-19 06:26:23 +00:00
|
|
|
}
|
|
|
|
|
2020-07-15 19:11:12 +00:00
|
|
|
st, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-11 20:13:49 +00:00
|
|
|
|
|
|
|
return st, nil
|
2020-04-19 06:26:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Adds a single file to the existing tar archive writer.
|
|
|
|
func (a *Archive) addToArchive(p string, s *os.FileInfo, w *tar.Writer) error {
|
|
|
|
f, err := os.Open(p)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
st := *s
|
|
|
|
header := &tar.Header{
|
|
|
|
// Trim the long server path from the name of the file so that the resulting
|
|
|
|
// archive is exactly how the user would see it in the panel file manager.
|
|
|
|
Name: strings.TrimPrefix(p, a.TrimPrefix),
|
|
|
|
Size: st.Size(),
|
|
|
|
Mode: int64(st.Mode()),
|
|
|
|
ModTime: st.ModTime(),
|
|
|
|
}
|
|
|
|
|
|
|
|
// These actions must occur sequentially, even if this function is called multiple
|
|
|
|
// in parallel. You'll get some nasty panic's otherwise.
|
|
|
|
a.Lock()
|
|
|
|
defer a.Unlock()
|
|
|
|
|
2020-07-15 19:11:12 +00:00
|
|
|
if err := w.WriteHeader(header); err != nil {
|
2020-04-19 06:26:23 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := io.Copy(w, f); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|