server(filesystem): fix inaccurate archive progress (#145)

This commit is contained in:
Matthew Penner 2022-11-06 13:38:30 -07:00 committed by GitHub
parent 3337362955
commit eb4df39d14
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 191 additions and 144 deletions

6
go.mod
View File

@ -35,7 +35,7 @@ require (
github.com/klauspost/pgzip v1.2.5
github.com/magiconair/properties v1.8.6
github.com/mattn/go-colorable v0.1.13
github.com/mholt/archiver/v3 v3.5.1
github.com/mholt/archiver/v4 v4.0.0-alpha.7
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/sftp v1.13.5
@ -88,7 +88,7 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nwaples/rardecode v1.1.3 // indirect
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
@ -103,9 +103,9 @@ require (
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/therootcompany/xz v1.0.1 // indirect
github.com/ugorji/go/codec v1.2.7 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect

18
go.sum
View File

@ -95,7 +95,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@ -429,7 +428,6 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@ -544,7 +542,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
@ -606,8 +603,8 @@ github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88J
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/mholt/archiver/v3 v3.5.1 h1:rDjOBX9JSF5BvoJGvjqK479aL70qh9DIpZCl+k7Clwo=
github.com/mholt/archiver/v3 v3.5.1/go.mod h1:e3dqJ7H78uzsRSEACH1joayhuSyhnonssnDhppzS1L4=
github.com/mholt/archiver/v4 v4.0.0-alpha.7 h1:xzByj8G8tj0Oq7ZYYU4+ixL/CVb5ruWCm0EZQ1PjOkE=
github.com/mholt/archiver/v4 v4.0.0-alpha.7/go.mod h1:Fs8qUkO74HHaidabihzYephJH8qmGD/nCP6tE5xC9BM=
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
@ -639,9 +636,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
github.com/nwaples/rardecode v1.1.3 h1:cWCaZwfM5H7nAD6PyEdcVnczzV8i/JtotnyW/dD9lEc=
github.com/nwaples/rardecode v1.1.3/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
@ -694,7 +690,6 @@ github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrap
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@ -824,6 +819,8 @@ github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk=
github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk=
@ -838,7 +835,6 @@ github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
@ -856,8 +852,6 @@ github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

View File

@ -13,15 +13,13 @@ import (
"strconv"
"strings"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/config"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gin-gonic/gin"
"golang.org/x/sync/errgroup"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/router/downloader"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/tokens"
@ -442,7 +440,7 @@ func postServerDecompressFiles(c *gin.Context) {
s := middleware.ExtractServer(c)
lg := middleware.ExtractLogger(c).WithFields(log.Fields{"root_path": data.RootPath, "file": data.File})
lg.Debug("checking if space is available for file decompression")
err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File)
err := s.Filesystem().SpaceAvailableForDecompression(context.Background(), data.RootPath, data.File)
if err != nil {
if filesystem.IsErrorCode(err, filesystem.ErrCodeUnknownArchive) {
lg.WithField("error", err).Warn("failed to decompress file: unknown archive format")
@ -454,7 +452,7 @@ func postServerDecompressFiles(c *gin.Context) {
}
lg.Info("starting file decompression")
if err := s.Filesystem().DecompressFile(data.RootPath, data.File); err != nil {
if err := s.Filesystem().DecompressFile(context.Background(), data.RootPath, data.File); err != nil {
// If the file is busy for some reason just return a nicer error to the user since there is not
// much we specifically can do. They'll need to stop the running server process in order to overwrite
// a file like this.

View File

@ -19,7 +19,6 @@ import (
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/juju/ratelimit"
"github.com/mholt/archiver/v3"
"github.com/mitchellh/colorstring"
"github.com/pterodactyl/wings/config"
@ -188,7 +187,7 @@ func postServerArchive(c *gin.Context) {
}
// Send the archive progress to the websocket every 3 seconds.
ctx, cancel := context.WithCancel(s.Context())
ctx2, cancel := context.WithCancel(s.Context())
defer cancel()
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
defer t.Stop()
@ -200,7 +199,7 @@ func postServerArchive(c *gin.Context) {
sendTransferLog("Archiving " + p.Progress(progressWidth))
}
}
}(ctx, a.Progress, time.NewTicker(5*time.Second))
}(ctx2, a.Progress, time.NewTicker(5*time.Second))
// Attempt to get an archive of the server.
if err := a.Create(getArchivePath(s.ID())); err != nil {
@ -422,9 +421,10 @@ func postTransfer(c *gin.Context) {
data.log().Info("writing transfer archive to disk...")
progress := filesystem.NewProgress(size)
progress.SetWriter(file)
// Send the archive progress to the websocket every 3 seconds.
ctx, cancel := context.WithCancel(ctx)
ctx2, cancel := context.WithCancel(ctx)
defer cancel()
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
defer t.Stop()
@ -436,7 +436,7 @@ func postTransfer(c *gin.Context) {
sendTransferLog("Downloading " + p.Progress(progressWidth))
}
}
}(ctx, progress, time.NewTicker(5*time.Second))
}(ctx2, progress, time.NewTicker(5*time.Second))
var reader io.Reader
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
@ -448,7 +448,7 @@ func postTransfer(c *gin.Context) {
}
buf := make([]byte, 1024*4)
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
if _, err := io.CopyBuffer(progress, reader, buf); err != nil {
_ = file.Close()
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
@ -495,7 +495,7 @@ func postTransfer(c *gin.Context) {
sendTransferLog("Server environment has been created, extracting transfer archive..")
data.log().Info("server environment configured, extracting transfer archive")
if err := archiver.NewTarGz().Unarchive(data.path(), i.Server().Filesystem().Path()); err != nil {
if err := i.Server().Filesystem().DecompressFileUnsafe(ctx, "/", data.path()); err != nil {
// Un-archiving failed, delete the server's data directory.
if err := os.RemoveAll(i.Server().Filesystem().Path()); err != nil && !os.IsNotExist(err) {
data.log().WithField("error", err).Warn("failed to delete local server files directory")

View File

@ -24,7 +24,6 @@ func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, suc
"backup": uuid,
"error": err,
}).Error("failed to notify panel of backup status due to wings error")
return err
}
@ -127,7 +126,7 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
defer func() {
s.Config().SetSuspended(false)
if reader != nil {
reader.Close()
_ = reader.Close()
}
}()
// Send an API call to the Panel as soon as this function is done running so that
@ -142,7 +141,7 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
// instance, otherwise you'll likely hit all types of write errors due to the
// server being suspended.
if s.Environment.State() != environment.ProcessOfflineState {
if err = s.Environment.WaitForStop(s.Context(), time.Minute*2, false); err != nil {
if err = s.Environment.WaitForStop(s.Context(), 2*time.Minute, false); err != nil {
if !client.IsErrNotFound(err) {
return errors.WrapIf(err, "server/backup: restore: failed to wait for container stop")
}
@ -152,14 +151,19 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
// Attempt to restore the backup to the server by running through each entry
// in the file one at a time and writing them to the disk.
s.Log().Debug("starting file writing process for backup restoration")
err = b.Restore(s.Context(), reader, func(file string, r io.Reader, mode fs.FileMode, atime, mtime time.Time) error {
err = b.Restore(s.Context(), reader, func(file string, info fs.FileInfo, r io.ReadCloser) error {
defer r.Close()
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
if err := s.Filesystem().Writefile(file, r); err != nil {
return err
}
if err := s.Filesystem().Chmod(file, mode); err != nil {
if err := s.Filesystem().Chmod(file, info.Mode()); err != nil {
return err
}
atime := info.ModTime()
mtime := atime
return s.Filesystem().Chtimes(file, atime, mtime)
})

View File

@ -8,16 +8,21 @@ import (
"io/fs"
"os"
"path"
"time"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/mholt/archiver/v4"
"golang.org/x/sync/errgroup"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
)
var format = archiver.CompressedArchive{
Compression: archiver.Gz{},
Archival: archiver.Tar{},
}
type AdapterType string
const (
@ -27,7 +32,7 @@ const (
// RestoreCallback is a generic restoration callback that exists for both local
// and remote backups allowing the files to be restored.
type RestoreCallback func(file string, r io.Reader, mode fs.FileMode, atime, mtime time.Time) error
type RestoreCallback func(file string, info fs.FileInfo, r io.ReadCloser) error
// noinspection GoNameStartsWithPackageName
type BackupInterface interface {

View File

@ -6,8 +6,10 @@ import (
"os"
"emperror.dev/errors"
"github.com/mholt/archiver/v3"
"github.com/juju/ratelimit"
"github.com/mholt/archiver/v4"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server/filesystem"
)
@ -79,16 +81,27 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
// Restore will walk over the archive and call the callback function for each
// file encountered.
func (b *LocalBackup) Restore(ctx context.Context, _ io.Reader, callback RestoreCallback) error {
return archiver.Walk(b.Path(), func(f archiver.File) error {
select {
case <-ctx.Done():
// Stop walking if the context is canceled.
return archiver.ErrStopWalk
default:
if f.IsDir() {
return nil
}
return callback(filesystem.ExtractNameFromArchive(f), f, f.Mode(), f.ModTime(), f.ModTime())
f, err := os.Open(b.Path())
if err != nil {
return err
}
var reader io.Reader = f
// Steal the logic we use for making backups which will be applied when restoring
// this specific backup. This allows us to prevent overloading the disk unintentionally.
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
reader = ratelimit.Reader(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
}
if err := format.Extract(ctx, reader, nil, func(ctx context.Context, f archiver.File) error {
r, err := f.Open()
if err != nil {
return err
}
})
defer r.Close()
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
}); err != nil {
return err
}
return nil
}

View File

@ -1,8 +1,6 @@
package backup
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
@ -13,13 +11,12 @@ import (
"emperror.dev/errors"
"github.com/cenkalti/backoff/v4"
"github.com/pterodactyl/wings/server/filesystem"
"github.com/juju/ratelimit"
"github.com/mholt/archiver/v4"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server/filesystem"
)
type S3Backup struct {
@ -96,31 +93,16 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
reader = ratelimit.Reader(r, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
}
gr, err := gzip.NewReader(reader)
if err != nil {
return err
}
defer gr.Close()
tr := tar.NewReader(gr)
for {
select {
case <-ctx.Done():
return nil
default:
// Do nothing, fall through to the next block of code in this loop.
}
header, err := tr.Next()
if err := format.Extract(ctx, reader, nil, func(ctx context.Context, f archiver.File) error {
r, err := f.Open()
if err != nil {
if err == io.EOF {
break
}
return err
}
if header.Typeflag == tar.TypeReg {
if err := callback(header.Name, tr, header.FileInfo().Mode(), header.AccessTime, header.ModTime); err != nil {
return err
}
}
defer r.Close()
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
}); err != nil {
return err
}
return nil
}

View File

@ -30,6 +30,30 @@ var pool = sync.Pool{
},
}
// TarProgress .
type TarProgress struct {
*tar.Writer
p *Progress
}
// NewTarProgress .
func NewTarProgress(w *tar.Writer, p *Progress) *TarProgress {
if p != nil {
p.w = w
}
return &TarProgress{
Writer: w,
p: p,
}
}
func (p *TarProgress) Write(v []byte) (int, error) {
if p.p == nil {
return p.Writer.Write(v)
}
return p.p.Write(v)
}
// Progress is used to track the progress of any I/O operation that are being
// performed.
type Progress struct {
@ -46,6 +70,12 @@ func NewProgress(total int64) *Progress {
return &Progress{total: total}
}
// SetWriter sets the writer progress will forward writes to.
// NOTE: This function is not thread safe.
func (p *Progress) SetWriter(w io.Writer) {
p.w = w
}
// Written returns the total number of bytes written.
// This function should be used when the progress is tracking data being written.
func (p *Progress) Written() int64 {
@ -157,23 +187,17 @@ func (a *Archive) Create(dst string) error {
_ = gw.SetConcurrency(1<<20, 1)
defer gw.Close()
var pw io.Writer
if a.Progress != nil {
a.Progress.w = gw
pw = a.Progress
} else {
pw = gw
}
// Create a new tar writer around the gzip writer.
tw := tar.NewWriter(pw)
tw := tar.NewWriter(gw)
defer tw.Close()
pw := NewTarProgress(tw, a.Progress)
// Configure godirwalk.
options := &godirwalk.Options{
FollowSymbolicLinks: false,
Unsorted: true,
Callback: a.callback(tw),
Callback: a.callback(pw),
}
// If we're specifically looking for only certain files, or have requested
@ -182,7 +206,7 @@ func (a *Archive) Create(dst string) error {
if len(a.Files) == 0 && len(a.Ignore) > 0 {
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
options.Callback = a.callback(tw, func(_ string, rp string) error {
options.Callback = a.callback(pw, func(_ string, rp string) error {
if i.MatchesPath(rp) {
return godirwalk.SkipThis
}
@ -190,7 +214,7 @@ func (a *Archive) Create(dst string) error {
return nil
})
} else if len(a.Files) > 0 {
options.Callback = a.withFilesCallback(tw)
options.Callback = a.withFilesCallback(pw)
}
// Recursively walk the path we are archiving.
@ -199,7 +223,7 @@ func (a *Archive) Create(dst string) error {
// Callback function used to determine if a given file should be included in the archive
// being generated.
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
func (a *Archive) callback(tw *TarProgress, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
return func(path string, de *godirwalk.Dirent) error {
// Skip directories because we are walking them recursively.
if de.IsDir() {
@ -223,7 +247,7 @@ func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative st
}
// Pushes only files defined in the Files key to the final archive.
func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirwalk.Dirent) error {
func (a *Archive) withFilesCallback(tw *TarProgress) func(path string, de *godirwalk.Dirent) error {
return a.callback(tw, func(p string, rp string) error {
for _, f := range a.Files {
// If the given doesn't match, or doesn't have the same prefix continue
@ -244,7 +268,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
}
// Adds a given file path to the final archive being created.
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
// Lstat the file, this will give us the same information as Stat except that it will not
// follow a symlink to its target automatically. This is important to avoid including
// files that exist outside the server root unintentionally in the backup.

View File

@ -4,7 +4,9 @@ import (
"archive/tar"
"archive/zip"
"compress/gzip"
"context"
"fmt"
iofs "io/fs"
"os"
"path"
"path/filepath"
@ -13,11 +15,10 @@ import (
"sync/atomic"
"time"
"emperror.dev/errors"
gzip2 "github.com/klauspost/compress/gzip"
zip2 "github.com/klauspost/compress/zip"
"emperror.dev/errors"
"github.com/mholt/archiver/v3"
"github.com/mholt/archiver/v4"
)
// CompressFiles compresses all of the files matching the given paths in the
@ -73,7 +74,7 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
// SpaceAvailableForDecompression looks through a given archive and determines
// if decompressing it would put the server over its allocated disk space limit.
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error {
func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir string, file string) error {
// Don't waste time trying to determine this if we know the server will have the space for
// it since there is no limit.
if fs.MaxDisk() <= 0 {
@ -89,69 +90,104 @@ func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) er
// waiting an unnecessary amount of time on this call.
dirSize, err := fs.DiskUsage(false)
var size int64
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
err = archiver.Walk(source, func(f archiver.File) error {
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
return newFilesystemError(ErrCodeDiskSpace, nil)
}
return nil
})
fsys, err := archiver.FileSystem(source)
if err != nil {
if IsUnknownArchiveFormatError(err) {
if errors.Is(err, archiver.ErrNoMatch) {
return newFilesystemError(ErrCodeUnknownArchive, err)
}
return err
}
return err
var size int64
return iofs.WalkDir(fsys, ".", func(path string, d iofs.DirEntry, err error) error {
if err != nil {
return err
}
select {
case <-ctx.Done():
// Stop walking if the context is canceled.
return ctx.Err()
default:
info, err := d.Info()
if err != nil {
return err
}
if atomic.AddInt64(&size, info.Size())+dirSize > fs.MaxDisk() {
return newFilesystemError(ErrCodeDiskSpace, nil)
}
return nil
}
})
}
// DecompressFile will decompress a file in a given directory by using the
// archiver tool to infer the file type and go from there. This will walk over
// all of the files within the given archive and ensure that there is not a
// all the files within the given archive and ensure that there is not a
// zip-slip attack being attempted by validating that the final path is within
// the server data directory.
func (fs *Filesystem) DecompressFile(dir string, file string) error {
func (fs *Filesystem) DecompressFile(ctx context.Context, dir string, file string) error {
source, err := fs.SafePath(filepath.Join(dir, file))
if err != nil {
return err
}
// Ensure that the source archive actually exists on the system.
if _, err := os.Stat(source); err != nil {
return fs.DecompressFileUnsafe(ctx, dir, source)
}
// DecompressFileUnsafe will decompress any file on the local disk without checking
// if it is owned by the server. The file will be SAFELY decompressed and extracted
// into the server's directory.
func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file string) error {
// Ensure that the archive actually exists on the system.
if _, err := os.Stat(file); err != nil {
return errors.WithStack(err)
}
// Walk all of the files in the archiver file and write them to the disk. If any
// directory is encountered it will be skipped since we handle creating any missing
// directories automatically when writing files.
err = archiver.Walk(source, func(f archiver.File) error {
if f.IsDir() {
return nil
}
p := filepath.Join(dir, ExtractNameFromArchive(f))
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
return nil
}
if err := fs.Writefile(p, f); err != nil {
return wrapError(err, source)
}
// Update the file permissions to the one set in the archive.
if err := fs.Chmod(p, f.Mode()); err != nil {
return wrapError(err, source)
}
// Update the file modification time to the one set in the archive.
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
return wrapError(err, source)
}
return nil
})
f, err := os.Open(file)
if err != nil {
if IsUnknownArchiveFormatError(err) {
return err
}
// Identify the type of archive we are dealing with.
format, input, err := archiver.Identify(filepath.Base(file), f)
if err != nil {
if errors.Is(err, archiver.ErrNoMatch) {
return newFilesystemError(ErrCodeUnknownArchive, err)
}
return err
}
// Decompress and extract archive
if ex, ok := format.(archiver.Extractor); ok {
return ex.Extract(ctx, input, nil, func(ctx context.Context, f archiver.File) error {
if f.IsDir() {
return nil
}
p := filepath.Join(dir, ExtractNameFromArchive(f))
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
return nil
}
r, err := f.Open()
if err != nil {
return err
}
defer r.Close()
if err := fs.Writefile(p, r); err != nil {
return wrapError(err, file)
}
// Update the file permissions to the one set in the archive.
if err := fs.Chmod(p, f.Mode()); err != nil {
return wrapError(err, file)
}
// Update the file modification time to the one set in the archive.
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
return wrapError(err, file)
}
return nil
})
}
return nil
}

View File

@ -1,6 +1,7 @@
package filesystem
import (
"context"
"os"
"sync/atomic"
"testing"
@ -28,7 +29,7 @@ func TestFilesystem_DecompressFile(t *testing.T) {
g.Assert(err).IsNil()
// decompress
err = fs.DecompressFile("/", "test."+ext)
err = fs.DecompressFile(context.Background(), "/", "test."+ext)
g.Assert(err).IsNil()
// make sure everything is where it is supposed to be

View File

@ -4,7 +4,6 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"emperror.dev/errors"
"github.com/apex/log"
@ -122,15 +121,6 @@ func IsErrorCode(err error, code ErrorCode) bool {
return false
}
// IsUnknownArchiveFormatError checks if the error is due to the archive being
// in an unexpected file format.
func IsUnknownArchiveFormatError(err error) bool {
if err != nil && strings.HasPrefix(err.Error(), "format ") {
return true
}
return false
}
// NewBadPathResolution returns a new BadPathResolution error.
func NewBadPathResolution(path string, resolved string) error {
return errors.WithStackDepth(&Error{code: ErrCodePathResolution, path: path, resolved: resolved}, 1)