Merge branch 'pterodactyl:develop' into develop
This commit is contained in:
commit
163498d48e
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
|
@ -1,2 +1 @@
|
|||
github: [ DaneEveritt ]
|
||||
custom: [ "https://paypal.me/PterodactylSoftware" ]
|
||||
github: [ matthewpi ]
|
||||
|
|
2
.github/workflows/build-test.yml
vendored
2
.github/workflows/build-test.yml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
go: [ '^1.17' ]
|
||||
go: [ '1.18.7' ]
|
||||
goos: [ linux ]
|
||||
goarch: [ amd64, arm64 ]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
|||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.17'
|
||||
go-version: '1.18.7'
|
||||
- name: Build
|
||||
env:
|
||||
REF: ${{ github.ref }}
|
||||
|
|
20
CHANGELOG.md
20
CHANGELOG.md
|
@ -1,5 +1,25 @@
|
|||
# Changelog
|
||||
|
||||
## v1.7.2
|
||||
### Fixed
|
||||
* The S3 backup driver now supports Cloudflare R2
|
||||
|
||||
### Added
|
||||
* During a server transfer, there is a new "Archiving" status that outputs the progress of creating the server transfer archive.
|
||||
* Adds a configuration option to control the list of trusted proxies that can be used to determine the client IP address.
|
||||
* Adds a configuration option to control the Docker username space setting when Wings creates containers.
|
||||
|
||||
### Changed
|
||||
* Releases are now built using `Go 1.18` — the minimum version required to build Wings is now `Go 1.18`.
|
||||
|
||||
## v1.7.1
|
||||
### Fixed
|
||||
* YAML parser has been updated to fix some strange issues
|
||||
|
||||
### Added
|
||||
* Added `Force Outgoing IP` option for servers to ensure outgoing traffic uses the server's IP address
|
||||
* Adds an option to control the level of gzip compression for backups
|
||||
|
||||
## v1.7.0
|
||||
### Fixed
|
||||
* Fixes multi-platform support for Wings' Docker image.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Stage 1 (Build)
|
||||
FROM golang:1.17-alpine AS builder
|
||||
FROM golang:1.18-alpine AS builder
|
||||
|
||||
ARG VERSION
|
||||
RUN apk add --update --no-cache git make
|
||||
|
|
|
@ -14,7 +14,7 @@ dependencies, and allowing users to authenticate with the same credentials they
|
|||
|
||||
## Sponsors
|
||||
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
|
||||
[Interested in becoming a sponsor?](https://github.com/sponsors/DaneEveritt)
|
||||
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
|
||||
|
||||
| Company | About |
|
||||
| ------- | ----- |
|
||||
|
|
|
@ -58,7 +58,7 @@ func newDiagnosticsCommand() *cobra.Command {
|
|||
return command
|
||||
}
|
||||
|
||||
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
|
||||
// diagnosticsCmdRun collects diagnostics about wings, its configuration and the node.
|
||||
// We collect:
|
||||
// - wings and docker versions
|
||||
// - relevant parts of daemon configuration
|
||||
|
|
|
@ -81,7 +81,7 @@ func init() {
|
|||
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
|
||||
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
|
||||
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
|
||||
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
|
||||
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
|
||||
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
||||
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
|
||||
|
||||
|
@ -162,7 +162,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||
ticker := time.NewTicker(time.Minute)
|
||||
// Every minute, write the current server states to the disk to allow for a more
|
||||
// seamless hard-reboot process in which wings will re-sync server states based
|
||||
// on it's last tracked state.
|
||||
// on its last tracked state.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
|
|
|
@ -91,6 +91,9 @@ type ApiConfiguration struct {
|
|||
|
||||
// The maximum size for files uploaded through the Panel in MB.
|
||||
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
||||
|
||||
// A list of IP address of proxies that may send a X-Forwarded-For header to set the true clients IP
|
||||
TrustedProxies []string `json:"trusted_proxies" yaml:"trusted_proxies"`
|
||||
}
|
||||
|
||||
// RemoteQueryConfiguration defines the configuration settings for remote requests
|
||||
|
@ -219,6 +222,15 @@ type Backups struct {
|
|||
//
|
||||
// Defaults to 0 (unlimited)
|
||||
WriteLimit int `default:"0" yaml:"write_limit"`
|
||||
|
||||
// CompressionLevel determines how much backups created by wings should be compressed.
|
||||
//
|
||||
// "none" -> no compression will be applied
|
||||
// "best_speed" -> uses gzip level 1 for fast speed
|
||||
// "best_compression" -> uses gzip level 9 for minimal disk space useage
|
||||
//
|
||||
// Defaults to "best_speed" (level 1)
|
||||
CompressionLevel string `default:"best_speed" yaml:"compression_level"`
|
||||
}
|
||||
|
||||
type Transfers struct {
|
||||
|
|
|
@ -78,6 +78,14 @@ type DockerConfiguration struct {
|
|||
Overhead Overhead `json:"overhead" yaml:"overhead"`
|
||||
|
||||
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
|
||||
|
||||
// Sets the user namespace mode for the container when user namespace remapping option is
|
||||
// enabled.
|
||||
//
|
||||
// If the value is blank, the daemon's user namespace remapping configuration is used,
|
||||
// if the value is "host", then the pterodactyl containers are started with user namespace
|
||||
// remapping disabled
|
||||
UsernsMode string `default:"" json:"userns_mode" yaml:"userns_mode"`
|
||||
}
|
||||
|
||||
// RegistryConfiguration defines the authentication credentials for a given
|
||||
|
|
|
@ -12,6 +12,11 @@ import (
|
|||
// Defines the allocations available for a given server. When using the Docker environment
|
||||
// driver these correspond to mappings for the container that allow external connections.
|
||||
type Allocations struct {
|
||||
// ForceOutgoingIP causes a dedicated bridge network to be created for the
|
||||
// server with a special option, causing Docker to SNAT outgoing traffic to
|
||||
// the DefaultMapping's IP. This is important to servers which rely on external
|
||||
// services that check the IP of the server (Source Engine servers, for example).
|
||||
ForceOutgoingIP bool `json:"force_outgoing_ip"`
|
||||
// Defines the default allocation that should be used for this server. This is
|
||||
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
||||
// files or the startup arguments for a server.
|
||||
|
|
|
@ -41,12 +41,12 @@ func ConfigureDocker(ctx context.Context) error {
|
|||
nw := config.Get().Docker.Network
|
||||
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
|
||||
if err != nil {
|
||||
if client.IsErrNotFound(err) {
|
||||
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
|
||||
if err := createDockerNetwork(ctx, cli); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !client.IsErrNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
|
||||
if err := createDockerNetwork(ctx, cli); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -147,10 +147,12 @@ func (e *Environment) InSituUpdate() error {
|
|||
// currently available for it. If the container already exists it will be
|
||||
// returned.
|
||||
func (e *Environment) Create() error {
|
||||
ctx := context.Background()
|
||||
|
||||
// If the container already exists don't hit the user with an error, just return
|
||||
// the current information about it which is what we would do when creating the
|
||||
// container anyways.
|
||||
if _, err := e.ContainerInspect(context.Background()); err == nil {
|
||||
if _, err := e.ContainerInspect(ctx); err == nil {
|
||||
return nil
|
||||
} else if !client.IsErrNotFound(err) {
|
||||
return errors.Wrap(err, "environment/docker: failed to inspect container")
|
||||
|
@ -190,7 +192,34 @@ func (e *Environment) Create() error {
|
|||
},
|
||||
}
|
||||
|
||||
tmpfsSize := strconv.Itoa(int(config.Get().Docker.TmpfsSize))
|
||||
networkMode := container.NetworkMode(config.Get().Docker.Network.Mode)
|
||||
if a.ForceOutgoingIP {
|
||||
e.log().Debug("environment/docker: forcing outgoing IP address")
|
||||
networkName := strings.ReplaceAll(e.Id, "-", "")
|
||||
networkMode = container.NetworkMode(networkName)
|
||||
|
||||
if _, err := e.client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}); err != nil {
|
||||
if !client.IsErrNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := e.client.NetworkCreate(ctx, networkName, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
EnableIPv6: false,
|
||||
Internal: false,
|
||||
Attachable: false,
|
||||
Ingress: false,
|
||||
ConfigOnly: false,
|
||||
Options: map[string]string{
|
||||
"encryption": "false",
|
||||
"com.docker.network.bridge.default_bridge": "false",
|
||||
"com.docker.network.host_ipv4": a.DefaultMapping.Ip,
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostConf := &container.HostConfig{
|
||||
PortBindings: a.DockerBindings(),
|
||||
|
@ -202,7 +231,7 @@ func (e *Environment) Create() error {
|
|||
// Configure the /tmp folder mapping in containers. This is necessary for some
|
||||
// games that need to make use of it for downloads and other installation processes.
|
||||
Tmpfs: map[string]string{
|
||||
"/tmp": "rw,exec,nosuid,size=" + tmpfsSize + "M",
|
||||
"/tmp": "rw,exec,nosuid,size=" + strconv.Itoa(int(config.Get().Docker.TmpfsSize)) + "M",
|
||||
},
|
||||
|
||||
// Define resource limits for the container based on the data passed through
|
||||
|
@ -231,10 +260,11 @@ func (e *Environment) Create() error {
|
|||
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
||||
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
||||
},
|
||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||
NetworkMode: networkMode,
|
||||
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
|
||||
}
|
||||
|
||||
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil {
|
||||
if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {
|
||||
return errors.Wrap(err, "environment/docker: failed to create container")
|
||||
}
|
||||
|
||||
|
|
119
go.mod
119
go.mod
|
@ -1,129 +1,124 @@
|
|||
module github.com/pterodactyl/wings
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
emperror.dev/errors v0.8.1
|
||||
github.com/AlecAivazis/survey/v2 v2.3.4
|
||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||
github.com/Jeffail/gabs/v2 v2.6.1
|
||||
github.com/NYTimes/logrotate v1.0.0
|
||||
github.com/apex/log v1.9.0
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||
github.com/beevik/etree v1.1.0
|
||||
github.com/buger/jsonparser v1.1.1
|
||||
github.com/cenkalti/backoff/v4 v4.1.2
|
||||
github.com/cenkalti/backoff/v4 v4.1.3
|
||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
||||
github.com/creasty/defaults v1.5.2
|
||||
github.com/docker/docker v20.10.14+incompatible
|
||||
github.com/creasty/defaults v1.6.0
|
||||
github.com/docker/docker v20.10.18+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
||||
github.com/gabriel-vasile/mimetype v1.4.0
|
||||
github.com/gammazero/workerpool v1.1.2
|
||||
github.com/gabriel-vasile/mimetype v1.4.1
|
||||
github.com/gammazero/workerpool v1.1.3
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||
github.com/gin-gonic/gin v1.7.7
|
||||
github.com/gin-gonic/gin v1.8.1
|
||||
github.com/glebarez/sqlite v1.4.8
|
||||
github.com/go-co-op/gocron v1.17.0
|
||||
github.com/goccy/go-json v0.9.11
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/iancoleman/strcase v0.2.0
|
||||
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
|
||||
github.com/juju/ratelimit v1.0.1
|
||||
github.com/karrick/godirwalk v1.16.1
|
||||
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
|
||||
github.com/juju/ratelimit v1.0.2
|
||||
github.com/karrick/godirwalk v1.17.0
|
||||
github.com/klauspost/compress v1.15.11
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/magiconair/properties v1.8.6
|
||||
github.com/mattn/go-colorable v0.1.12
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mholt/archiver/v3 v3.5.1
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/sftp v1.13.4
|
||||
github.com/pkg/sftp v1.13.5
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/stretchr/testify v1.7.5
|
||||
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
gopkg.in/ini.v1 v1.66.4
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/stretchr/testify v1.8.0
|
||||
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
|
||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
|
||||
gopkg.in/ini.v1 v1.67.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gorm.io/gorm v1.23.10
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/glebarez/sqlite v1.4.6
|
||||
github.com/go-co-op/gocron v1.15.0
|
||||
github.com/goccy/go-json v0.9.6
|
||||
github.com/klauspost/compress v1.15.1
|
||||
gorm.io/gorm v1.23.8
|
||||
)
|
||||
|
||||
require golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/containerd v1.6.2 // indirect
|
||||
github.com/containerd/fifo v1.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/gammazero/deque v0.1.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/gammazero/deque v0.2.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.17.3 // indirect
|
||||
github.com/glebarez/go-sqlite v1.19.1 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.10.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.11.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gorilla/mux v1.7.4 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/magefile/mage v1.13.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/magefile/mage v1.14.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/nwaples/rardecode v1.1.3 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.14 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
|
||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
|
||||
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
|
||||
google.golang.org/grpc v1.45.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/libc v1.16.17 // indirect
|
||||
modernc.org/mathutil v1.4.1 // indirect
|
||||
modernc.org/memory v1.1.1 // indirect
|
||||
modernc.org/sqlite v1.17.3 // indirect
|
||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
modernc.org/libc v1.20.0 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.4.0 // indirect
|
||||
modernc.org/sqlite v1.19.1 // indirect
|
||||
)
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/icza/dyno"
|
||||
"github.com/magiconair/properties"
|
||||
"gopkg.in/ini.v1"
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/pterodactyl/wings/config"
|
||||
)
|
||||
|
|
|
@ -3,10 +3,11 @@ package remote
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/pterodactyl/wings/internal/models"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/pterodactyl/wings/internal/models"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
|
|
@ -2,11 +2,12 @@ package remote
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/apex/log"
|
||||
"github.com/goccy/go-json"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/apex/log"
|
||||
"github.com/goccy/go-json"
|
||||
|
||||
"github.com/pterodactyl/wings/parser"
|
||||
)
|
||||
|
||||
|
@ -156,9 +157,15 @@ type BackupRemoteUploadResponse struct {
|
|||
PartSize int64 `json:"part_size"`
|
||||
}
|
||||
|
||||
type BackupRequest struct {
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
Size int64 `json:"size"`
|
||||
Successful bool `json:"successful"`
|
||||
type BackupPart struct {
|
||||
ETag string `json:"etag"`
|
||||
PartNumber int `json:"part_number"`
|
||||
}
|
||||
|
||||
type BackupRequest struct {
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
Size int64 `json:"size"`
|
||||
Successful bool `json:"successful"`
|
||||
Parts []BackupPart `json:"parts"`
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
wserver "github.com/pterodactyl/wings/server"
|
||||
|
@ -15,6 +16,7 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
|
|||
|
||||
router := gin.New()
|
||||
router.Use(gin.Recovery())
|
||||
router.SetTrustedProxies(config.Get().Api.TrustedProxies)
|
||||
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
|
||||
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
|
||||
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
||||
|
|
|
@ -180,7 +180,7 @@ func postServerReinstall(c *gin.Context) {
|
|||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// Deletes a server from the wings daemon and dissociate it's objects.
|
||||
// Deletes a server from the wings daemon and dissociate its objects.
|
||||
func deleteServer(c *gin.Context) {
|
||||
s := middleware.ExtractServer(c)
|
||||
|
||||
|
|
|
@ -602,7 +602,7 @@ func postServerUploadFiles(c *gin.Context) {
|
|||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
} else {
|
||||
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.Request.RemoteAddr), server.ActivityFileUploaded, models.ActivityMeta{
|
||||
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.ClientIP()), server.ActivityFileUploaded, models.ActivityMeta{
|
||||
"file": header.Filename,
|
||||
"directory": filepath.Clean(directory),
|
||||
})
|
||||
|
|
|
@ -32,7 +32,7 @@ func getServerWebsocket(c *gin.Context) {
|
|||
ctx, cancel := context.WithCancel(c.Request.Context())
|
||||
defer cancel()
|
||||
|
||||
handler, err := websocket.GetHandler(s, c.Writer, c.Request)
|
||||
handler, err := websocket.GetHandler(s, c.Writer, c.Request, c)
|
||||
if err != nil {
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
|
@ -30,19 +29,9 @@ import (
|
|||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
// Number of ticks in the progress bar
|
||||
const ticks = 25
|
||||
|
||||
// 100% / number of ticks = percentage represented by each tick
|
||||
const tickPercentage = 100 / ticks
|
||||
|
||||
type downloadProgress struct {
|
||||
size int64
|
||||
progress int64
|
||||
}
|
||||
const progressWidth = 25
|
||||
|
||||
// Data passed over to initiate a server transfer.
|
||||
type serverTransferRequest struct {
|
||||
|
@ -95,7 +84,7 @@ func getServerArchive(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
// Compute sha1 checksum.
|
||||
// Compute sha256 checksum.
|
||||
h := sha256.New()
|
||||
f, err := os.Open(archivePath)
|
||||
if err != nil {
|
||||
|
@ -184,11 +173,35 @@ func postServerArchive(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
// Get the disk usage of the server (used to calculate the progress of the archive process)
|
||||
rawSize, err := s.Filesystem().DiskUsage(true)
|
||||
if err != nil {
|
||||
sendTransferLog("Failed to get disk usage for server, aborting transfer..")
|
||||
l.WithField("error", err).Error("failed to get disk usage for server")
|
||||
return
|
||||
}
|
||||
|
||||
// Create an archive of the entire server's data directory.
|
||||
a := &filesystem.Archive{
|
||||
BasePath: s.Filesystem().Path(),
|
||||
Progress: filesystem.NewProgress(rawSize),
|
||||
}
|
||||
|
||||
// Send the archive progress to the websocket every 3 seconds.
|
||||
ctx, cancel := context.WithCancel(s.Context())
|
||||
defer cancel()
|
||||
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
sendTransferLog("Archiving " + p.Progress(progressWidth))
|
||||
}
|
||||
}
|
||||
}(ctx, a.Progress, time.NewTicker(5*time.Second))
|
||||
|
||||
// Attempt to get an archive of the server.
|
||||
if err := a.Create(getArchivePath(s.ID())); err != nil {
|
||||
sendTransferLog("An error occurred while archiving the server: " + err.Error())
|
||||
|
@ -196,6 +209,12 @@ func postServerArchive(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
// Cancel the progress ticker.
|
||||
cancel()
|
||||
|
||||
// Show 100% completion.
|
||||
sendTransferLog("Archiving " + a.Progress.Progress(progressWidth))
|
||||
|
||||
sendTransferLog("Successfully created archive, attempting to notify panel..")
|
||||
l.Info("successfully created server transfer archive, notifying panel..")
|
||||
|
||||
|
@ -223,12 +242,6 @@ func postServerArchive(c *gin.Context) {
|
|||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
func (w *downloadProgress) Write(v []byte) (int, error) {
|
||||
n := len(v)
|
||||
atomic.AddInt64(&w.progress, int64(n))
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Log helper function to attach all errors and info output to a consistently formatted
|
||||
// log string for easier querying.
|
||||
func (str serverTransferRequest) log() *log.Entry {
|
||||
|
@ -321,7 +334,7 @@ func postTransfer(c *gin.Context) {
|
|||
manager := middleware.ExtractManager(c)
|
||||
u, err := uuid.Parse(data.ServerID)
|
||||
if err != nil {
|
||||
WithError(c, err)
|
||||
_ = WithError(c, err)
|
||||
return
|
||||
}
|
||||
// Force the server ID to be a valid UUID string at this point. If it is not an error
|
||||
|
@ -331,11 +344,12 @@ func postTransfer(c *gin.Context) {
|
|||
|
||||
data.log().Info("handling incoming server transfer request")
|
||||
go func(data *serverTransferRequest) {
|
||||
ctx := context.Background()
|
||||
hasError := true
|
||||
|
||||
// Create a new server installer. This will only configure the environment and not
|
||||
// run the installer scripts.
|
||||
i, err := installer.New(context.Background(), manager, data.Server)
|
||||
i, err := installer.New(ctx, manager, data.Server)
|
||||
if err != nil {
|
||||
_ = data.sendTransferStatus(manager.Client(), false)
|
||||
data.log().WithField("error", err).Error("failed to validate received server data")
|
||||
|
@ -407,25 +421,22 @@ func postTransfer(c *gin.Context) {
|
|||
sendTransferLog("Writing archive to disk...")
|
||||
data.log().Info("writing transfer archive to disk...")
|
||||
|
||||
// Copy the file.
|
||||
progress := &downloadProgress{size: size}
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
go func(progress *downloadProgress, t *time.Ticker) {
|
||||
for range ticker.C {
|
||||
// p = 100 (Downloaded)
|
||||
// size = 1000 (Content-Length)
|
||||
// p / size = 0.1
|
||||
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
||||
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
||||
// 2.5 (Number of ticks as a float64)
|
||||
// 2 (convert to an integer)
|
||||
p := atomic.LoadInt64(&progress.progress)
|
||||
// We have to cast these numbers to float in order to get a float result from the division.
|
||||
width := ((float64(p) / float64(size)) * 100) / tickPercentage
|
||||
bar := strings.Repeat("=", int(width)) + strings.Repeat(" ", ticks-int(width))
|
||||
sendTransferLog("Downloading [" + bar + "] " + system.FormatBytes(p) + " / " + system.FormatBytes(progress.size))
|
||||
progress := filesystem.NewProgress(size)
|
||||
|
||||
// Send the archive progress to the websocket every 3 seconds.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
sendTransferLog("Downloading " + p.Progress(progressWidth))
|
||||
}
|
||||
}
|
||||
}(progress, ticker)
|
||||
}(ctx, progress, time.NewTicker(5*time.Second))
|
||||
|
||||
var reader io.Reader
|
||||
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
|
||||
|
@ -438,18 +449,16 @@ func postTransfer(c *gin.Context) {
|
|||
|
||||
buf := make([]byte, 1024*4)
|
||||
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
|
||||
ticker.Stop()
|
||||
_ = file.Close()
|
||||
|
||||
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
|
||||
data.log().WithField("error", err).Error("failed to copy archive file to disk")
|
||||
return
|
||||
}
|
||||
ticker.Stop()
|
||||
cancel()
|
||||
|
||||
// Show 100% completion.
|
||||
humanSize := system.FormatBytes(progress.size)
|
||||
sendTransferLog("Downloading [" + strings.Repeat("=", ticks) + "] " + humanSize + " / " + humanSize)
|
||||
sendTransferLog("Downloading " + progress.Progress(progressWidth))
|
||||
|
||||
if err := file.Close(); err != nil {
|
||||
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/websocket"
|
||||
|
@ -79,7 +80,7 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
|
|||
}
|
||||
|
||||
// GetHandler returns a new websocket handler using the context provided.
|
||||
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Handler, error) {
|
||||
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin.Context) (*Handler, error) {
|
||||
upgrader := websocket.Upgrader{
|
||||
// Ensure that the websocket request is originating from the Panel itself,
|
||||
// and not some other location.
|
||||
|
@ -111,7 +112,7 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
|
|||
Connection: conn,
|
||||
jwt: nil,
|
||||
server: s,
|
||||
ra: s.NewRequestActivity("", r.RemoteAddr),
|
||||
ra: s.NewRequestActivity("", c.ClientIP()),
|
||||
uuid: u,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ type RestoreCallback func(file string, r io.Reader, mode fs.FileMode, atime, mti
|
|||
// noinspection GoNameStartsWithPackageName
|
||||
type BackupInterface interface {
|
||||
// SetClient sets the API request client on the backup interface.
|
||||
SetClient(c remote.Client)
|
||||
SetClient(remote.Client)
|
||||
// Identifier returns the UUID of this backup as tracked by the panel
|
||||
// instance.
|
||||
Identifier() string
|
||||
|
@ -41,7 +41,7 @@ type BackupInterface interface {
|
|||
WithLogContext(map[string]interface{})
|
||||
// Generate creates a backup in whatever the configured source for the
|
||||
// specific implementation is.
|
||||
Generate(ctx context.Context, basePath string, ignore string) (*ArchiveDetails, error)
|
||||
Generate(context.Context, string, string) (*ArchiveDetails, error)
|
||||
// Ignored returns the ignored files for this backup instance.
|
||||
Ignored() string
|
||||
// Checksum returns a SHA1 checksum for the generated backup.
|
||||
|
@ -53,13 +53,13 @@ type BackupInterface interface {
|
|||
// to store it until it is moved to the final spot.
|
||||
Path() string
|
||||
// Details returns details about the archive.
|
||||
Details(ctx context.Context) (*ArchiveDetails, error)
|
||||
Details(context.Context, []remote.BackupPart) (*ArchiveDetails, error)
|
||||
// Remove removes a backup file.
|
||||
Remove() error
|
||||
// Restore is called when a backup is ready to be restored to the disk from
|
||||
// the given source. Not every backup implementation will support this nor
|
||||
// will every implementation require a reader be provided.
|
||||
Restore(ctx context.Context, reader io.Reader, callback RestoreCallback) error
|
||||
Restore(context.Context, io.Reader, RestoreCallback) error
|
||||
}
|
||||
|
||||
type Backup struct {
|
||||
|
@ -119,8 +119,8 @@ func (b *Backup) Checksum() ([]byte, error) {
|
|||
|
||||
// Details returns both the checksum and size of the archive currently stored on
|
||||
// the disk to the caller.
|
||||
func (b *Backup) Details(ctx context.Context) (*ArchiveDetails, error) {
|
||||
ad := ArchiveDetails{ChecksumType: "sha1"}
|
||||
func (b *Backup) Details(ctx context.Context, parts []remote.BackupPart) (*ArchiveDetails, error) {
|
||||
ad := ArchiveDetails{ChecksumType: "sha1", Parts: parts}
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
g.Go(func() error {
|
||||
|
@ -162,9 +162,10 @@ func (b *Backup) log() *log.Entry {
|
|||
}
|
||||
|
||||
type ArchiveDetails struct {
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
Size int64 `json:"size"`
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
Size int64 `json:"size"`
|
||||
Parts []remote.BackupPart `json:"parts"`
|
||||
}
|
||||
|
||||
// ToRequest returns a request object.
|
||||
|
@ -174,5 +175,6 @@ func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
|
|||
ChecksumType: ad.ChecksumType,
|
||||
Size: ad.Size,
|
||||
Successful: successful,
|
||||
Parts: ad.Parts,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
|
|||
}
|
||||
b.log().Info("created backup successfully")
|
||||
|
||||
ad, err := b.Details(ctx)
|
||||
ad, err := b.Details(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
|
||||
}
|
||||
|
|
|
@ -71,10 +71,11 @@ func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*Arch
|
|||
}
|
||||
defer rc.Close()
|
||||
|
||||
if err := s.generateRemoteRequest(ctx, rc); err != nil {
|
||||
parts, err := s.generateRemoteRequest(ctx, rc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ad, err := s.Details(ctx)
|
||||
ad, err := s.Details(ctx, parts)
|
||||
if err != nil {
|
||||
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
|
||||
}
|
||||
|
@ -125,20 +126,20 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
|
|||
}
|
||||
|
||||
// Generates the remote S3 request and begins the upload.
|
||||
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) error {
|
||||
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) ([]remote.BackupPart, error) {
|
||||
defer rc.Close()
|
||||
|
||||
s.log().Debug("attempting to get size of backup...")
|
||||
size, err := s.Backup.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
s.log().WithField("size", size).Debug("got size of backup")
|
||||
|
||||
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
||||
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
s.log().Debug("got S3 upload urls from the Panel")
|
||||
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
||||
|
@ -156,22 +157,26 @@ func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser)
|
|||
}
|
||||
|
||||
// Attempt to upload the part.
|
||||
if _, err := uploader.uploadPart(ctx, part, partSize); err != nil {
|
||||
etag, err := uploader.uploadPart(ctx, part, partSize)
|
||||
if err != nil {
|
||||
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploader.uploadedParts = append(uploader.uploadedParts, remote.BackupPart{
|
||||
ETag: etag,
|
||||
PartNumber: i + 1,
|
||||
})
|
||||
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
||||
}
|
||||
|
||||
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
||||
|
||||
return nil
|
||||
return uploader.uploadedParts, nil
|
||||
}
|
||||
|
||||
type s3FileUploader struct {
|
||||
io.ReadCloser
|
||||
client *http.Client
|
||||
client *http.Client
|
||||
uploadedParts []remote.BackupPart
|
||||
}
|
||||
|
||||
// newS3FileUploader returns a new file uploader instance.
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
|
@ -17,6 +18,7 @@ import (
|
|||
ignore "github.com/sabhiram/go-gitignore"
|
||||
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
const memory = 4 * 1024
|
||||
|
@ -28,6 +30,62 @@ var pool = sync.Pool{
|
|||
},
|
||||
}
|
||||
|
||||
// Progress is used to track the progress of any I/O operation that are being
|
||||
// performed.
|
||||
type Progress struct {
|
||||
// written is the total size of the files that have been written to the writer.
|
||||
written int64
|
||||
// Total is the total size of the archive in bytes.
|
||||
total int64
|
||||
// w .
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
// NewProgress .
|
||||
func NewProgress(total int64) *Progress {
|
||||
return &Progress{total: total}
|
||||
}
|
||||
|
||||
// Written returns the total number of bytes written.
|
||||
// This function should be used when the progress is tracking data being written.
|
||||
func (p *Progress) Written() int64 {
|
||||
return atomic.LoadInt64(&p.written)
|
||||
}
|
||||
|
||||
// Total returns the total size in bytes.
|
||||
func (p *Progress) Total() int64 {
|
||||
return atomic.LoadInt64(&p.total)
|
||||
}
|
||||
|
||||
// Write totals the number of bytes that have been written to the writer.
|
||||
func (p *Progress) Write(v []byte) (int, error) {
|
||||
n := len(v)
|
||||
atomic.AddInt64(&p.written, int64(n))
|
||||
if p.w != nil {
|
||||
return p.w.Write(v)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Progress returns a formatted progress string for the current progress.
|
||||
func (p *Progress) Progress(width int) string {
|
||||
current := p.Written()
|
||||
total := p.Total()
|
||||
|
||||
// v = 100 (Progress)
|
||||
// size = 1000 (Content-Length)
|
||||
// p / size = 0.1
|
||||
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
||||
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
||||
// 2.5 (Number of ticks as a float64)
|
||||
// 2 (convert to an integer)
|
||||
|
||||
// We have to cast these numbers to float in order to get a float result from the division.
|
||||
ticks := ((float64(current) / float64(total)) * 100) / (float64(100) / float64(width))
|
||||
bar := strings.Repeat("=", int(ticks)) + strings.Repeat(" ", width-int(ticks))
|
||||
return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total)
|
||||
}
|
||||
|
||||
type Archive struct {
|
||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
||||
// relative to.
|
||||
|
@ -40,10 +98,13 @@ type Archive struct {
|
|||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
||||
Files []string
|
||||
|
||||
// Progress wraps the writer of the archive to pass through the progress tracker.
|
||||
Progress *Progress
|
||||
}
|
||||
|
||||
// Create creates an archive at dst with all of the files defined in the
|
||||
// included files struct.
|
||||
// Create creates an archive at dst with all the files defined in the
|
||||
// included Files array.
|
||||
func (a *Archive) Create(dst string) error {
|
||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||
if err != nil {
|
||||
|
@ -62,13 +123,34 @@ func (a *Archive) Create(dst string) error {
|
|||
writer = f
|
||||
}
|
||||
|
||||
// Choose which compression level to use based on the compression_level configuration option
|
||||
var compressionLevel int
|
||||
switch config.Get().System.Backups.CompressionLevel {
|
||||
case "none":
|
||||
compressionLevel = pgzip.NoCompression
|
||||
case "best_compression":
|
||||
compressionLevel = pgzip.BestCompression
|
||||
case "best_speed":
|
||||
fallthrough
|
||||
default:
|
||||
compressionLevel = pgzip.BestSpeed
|
||||
}
|
||||
|
||||
// Create a new gzip writer around the file.
|
||||
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
|
||||
gw, _ := pgzip.NewWriterLevel(writer, compressionLevel)
|
||||
_ = gw.SetConcurrency(1<<20, 1)
|
||||
defer gw.Close()
|
||||
|
||||
var pw io.Writer
|
||||
if a.Progress != nil {
|
||||
a.Progress.w = gw
|
||||
pw = a.Progress
|
||||
} else {
|
||||
pw = gw
|
||||
}
|
||||
|
||||
// Create a new tar writer around the gzip writer.
|
||||
tw := tar.NewWriter(gw)
|
||||
tw := tar.NewWriter(pw)
|
||||
defer tw.Close()
|
||||
|
||||
// Configure godirwalk.
|
||||
|
@ -103,7 +185,7 @@ func (a *Archive) Create(dst string) error {
|
|||
// being generated.
|
||||
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
||||
return func(path string, de *godirwalk.Dirent) error {
|
||||
// Skip directories because we walking them recursively.
|
||||
// Skip directories because we are walking them recursively.
|
||||
if de.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
@ -148,7 +230,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
|
|||
// Adds a given file path to the final archive being created.
|
||||
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
||||
// Lstat the file, this will give us the same information as Stat except that it will not
|
||||
// follow a symlink to it's target automatically. This is important to avoid including
|
||||
// follow a symlink to its target automatically. This is important to avoid including
|
||||
// files that exist outside the server root unintentionally in the backup.
|
||||
s, err := os.Lstat(p)
|
||||
if err != nil {
|
||||
|
|
|
@ -71,7 +71,7 @@ func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
|||
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
||||
//
|
||||
// Technically we could skip disk space calculation because we don't need to check if the
|
||||
// server exceeds it's limit but because this method caches the disk usage it would be best
|
||||
// server exceeds its limit but because this method caches the disk usage it would be best
|
||||
// to calculate the disk usage and always return true.
|
||||
if fs.MaxDisk() == 0 {
|
||||
return true
|
||||
|
|
|
@ -449,6 +449,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
|||
},
|
||||
Privileged: true,
|
||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
|
||||
}
|
||||
|
||||
// Ensure the root directory for the server exists properly before attempting
|
||||
|
|
|
@ -23,7 +23,7 @@ type SinkPool struct {
|
|||
}
|
||||
|
||||
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
|
||||
// server instance for it's full lifetime.
|
||||
// server instance for its full lifetime.
|
||||
func NewSinkPool() *SinkPool {
|
||||
return &SinkPool{}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user