Add configurable disk write speed limit for backups (#74)

* Add configurable disk write speed limit for backups
This commit is contained in:
Matthew Penner 2020-12-08 09:13:48 -07:00 committed by GitHub
parent 2d4dd05ec9
commit 8e29ffed50
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 39 additions and 2 deletions

View File

@ -75,6 +75,8 @@ type SystemConfiguration struct {
Sftp SftpConfiguration `yaml:"sftp"`
CrashDetection CrashDetection `yaml:"crash_detection"`
Backups Backups `yaml:"backups"`
}
type CrashDetection struct {
@ -89,6 +91,18 @@ type CrashDetection struct {
Timeout int `default:"60" json:"timeout"`
}
type Backups struct {
// WriteLimit imposes a Disk I/O write limit on backups to the disk, this affects all
// backup drivers as the archiver must first write the file to the disk in order to
// upload it to any external storage provider.
//
// If the value is less than 1, the write speed is unlimited,
// if the value is greater than 0, the write speed is the value in MB/s.
//
// Defaults to 0 (unlimited)
WriteLimit int `default:"0" yaml:"write_limit"`
}
// Ensures that all of the system directories exist on the system. These directories are
// created so that only the owner can read the data, and no other users.
func (sc *SystemConfiguration) ConfigureDirectories() error {

1
go.mod
View File

@ -39,6 +39,7 @@ require (
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334
github.com/icza/dyno v0.0.0-20200205103839-49cb13720835
github.com/imdario/mergo v0.3.8
github.com/juju/ratelimit v1.0.1
github.com/karrick/godirwalk v1.16.1
github.com/klauspost/compress v1.10.10 // indirect
github.com/klauspost/pgzip v1.2.4

2
go.sum
View File

@ -275,6 +275,8 @@ github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=

View File

@ -4,8 +4,10 @@ import (
"archive/tar"
"context"
"github.com/apex/log"
"github.com/juju/ratelimit"
gzip "github.com/klauspost/pgzip"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/config"
"github.com/remeh/sizedwaitgroup"
"golang.org/x/sync/errgroup"
"io"
@ -30,13 +32,31 @@ func (a *Archive) Create(dst string, ctx context.Context) error {
}
defer f.Close()
// Select a writer based off of the WriteLimit configuration option.
var writer io.Writer
if writeLimit := config.Get().System.Backups.WriteLimit; writeLimit < 1 {
// If there is no write limit, use the file as the writer.
writer = f
} else {
// Token bucket with a capacity of "writeLimit" MiB, adding "writeLimit" MiB/s
bucket := ratelimit.NewBucketWithRate(float64(writeLimit)*1024*1024, int64(writeLimit)*1024*1024)
// Wrap the file writer with the token bucket limiter.
writer = ratelimit.Writer(f, bucket)
}
maxCpu := runtime.NumCPU() / 2
if maxCpu > 4 {
maxCpu = 4
}
gzw, _ := gzip.NewWriterLevel(f, gzip.BestSpeed)
_ = gzw.SetConcurrency(1<<20, maxCpu)
gzw, err := gzip.NewWriterLevel(writer, gzip.BestSpeed)
if err != nil {
return errors.WithMessage(err, "failed to create gzip writer")
}
if err := gzw.SetConcurrency(1<<20, maxCpu); err != nil {
return errors.WithMessage(err, "failed to set gzip concurrency")
}
defer gzw.Flush()
defer gzw.Close()