Compare commits
1 Commits
release/v1
...
release/v1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce5a470ae8 |
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -1 +1,2 @@
|
|||||||
github: [ matthewpi ]
|
github: [ DaneEveritt ]
|
||||||
|
custom: [ "https://paypal.me/PterodactylSoftware" ]
|
||||||
|
|||||||
2
.github/workflows/build-test.yml
vendored
2
.github/workflows/build-test.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-20.04 ]
|
||||||
go: [ '1.18.7' ]
|
go: [ '^1.17' ]
|
||||||
goos: [ linux ]
|
goos: [ linux ]
|
||||||
goarch: [ amd64, arm64 ]
|
goarch: [ amd64, arm64 ]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
|
|||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: '1.18.7'
|
go-version: '^1.17'
|
||||||
- name: Build
|
- name: Build
|
||||||
env:
|
env:
|
||||||
REF: ${{ github.ref }}
|
REF: ${{ github.ref }}
|
||||||
|
|||||||
32
CHANGELOG.md
32
CHANGELOG.md
@@ -1,37 +1,5 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
## v1.7.5
|
|
||||||
### Fixed
|
|
||||||
* CVE-2023-32080
|
|
||||||
|
|
||||||
## v1.7.4
|
|
||||||
### Fixed
|
|
||||||
* CVE-2023-25168
|
|
||||||
|
|
||||||
## v1.7.3
|
|
||||||
### Fixed
|
|
||||||
* CVE-2023-25152
|
|
||||||
|
|
||||||
## v1.7.2
|
|
||||||
### Fixed
|
|
||||||
* The S3 backup driver now supports Cloudflare R2
|
|
||||||
|
|
||||||
### Added
|
|
||||||
* During a server transfer, there is a new "Archiving" status that outputs the progress of creating the server transfer archive.
|
|
||||||
* Adds a configuration option to control the list of trusted proxies that can be used to determine the client IP address.
|
|
||||||
* Adds a configuration option to control the Docker username space setting when Wings creates containers.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
* Releases are now built using `Go 1.18` — the minimum version required to build Wings is now `Go 1.18`.
|
|
||||||
|
|
||||||
## v1.7.1
|
|
||||||
### Fixed
|
|
||||||
* YAML parser has been updated to fix some strange issues
|
|
||||||
|
|
||||||
### Added
|
|
||||||
* Added `Force Outgoing IP` option for servers to ensure outgoing traffic uses the server's IP address
|
|
||||||
* Adds an option to control the level of gzip compression for backups
|
|
||||||
|
|
||||||
## v1.7.0
|
## v1.7.0
|
||||||
### Fixed
|
### Fixed
|
||||||
* Fixes multi-platform support for Wings' Docker image.
|
* Fixes multi-platform support for Wings' Docker image.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Stage 1 (Build)
|
# Stage 1 (Build)
|
||||||
FROM golang:1.18-alpine AS builder
|
FROM golang:1.17-alpine AS builder
|
||||||
|
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
RUN apk add --update --no-cache git make
|
RUN apk add --update --no-cache git make
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ dependencies, and allowing users to authenticate with the same credentials they
|
|||||||
|
|
||||||
## Sponsors
|
## Sponsors
|
||||||
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
|
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
|
||||||
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
|
[Interested in becoming a sponsor?](https://github.com/sponsors/DaneEveritt)
|
||||||
|
|
||||||
| Company | About |
|
| Company | About |
|
||||||
| ------- | ----- |
|
| ------- | ----- |
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ func newDiagnosticsCommand() *cobra.Command {
|
|||||||
return command
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
// diagnosticsCmdRun collects diagnostics about wings, its configuration and the node.
|
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
|
||||||
// We collect:
|
// We collect:
|
||||||
// - wings and docker versions
|
// - wings and docker versions
|
||||||
// - relevant parts of daemon configuration
|
// - relevant parts of daemon configuration
|
||||||
|
|||||||
@@ -81,7 +81,7 @@ func init() {
|
|||||||
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
|
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
|
||||||
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
|
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
|
||||||
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
|
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
|
||||||
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
|
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
|
||||||
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
||||||
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
|
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
|
||||||
|
|
||||||
@@ -162,7 +162,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
ticker := time.NewTicker(time.Minute)
|
ticker := time.NewTicker(time.Minute)
|
||||||
// Every minute, write the current server states to the disk to allow for a more
|
// Every minute, write the current server states to the disk to allow for a more
|
||||||
// seamless hard-reboot process in which wings will re-sync server states based
|
// seamless hard-reboot process in which wings will re-sync server states based
|
||||||
// on its last tracked state.
|
// on it's last tracked state.
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
|||||||
@@ -91,9 +91,6 @@ type ApiConfiguration struct {
|
|||||||
|
|
||||||
// The maximum size for files uploaded through the Panel in MB.
|
// The maximum size for files uploaded through the Panel in MB.
|
||||||
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
||||||
|
|
||||||
// A list of IP address of proxies that may send a X-Forwarded-For header to set the true clients IP
|
|
||||||
TrustedProxies []string `json:"trusted_proxies" yaml:"trusted_proxies"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoteQueryConfiguration defines the configuration settings for remote requests
|
// RemoteQueryConfiguration defines the configuration settings for remote requests
|
||||||
@@ -222,15 +219,6 @@ type Backups struct {
|
|||||||
//
|
//
|
||||||
// Defaults to 0 (unlimited)
|
// Defaults to 0 (unlimited)
|
||||||
WriteLimit int `default:"0" yaml:"write_limit"`
|
WriteLimit int `default:"0" yaml:"write_limit"`
|
||||||
|
|
||||||
// CompressionLevel determines how much backups created by wings should be compressed.
|
|
||||||
//
|
|
||||||
// "none" -> no compression will be applied
|
|
||||||
// "best_speed" -> uses gzip level 1 for fast speed
|
|
||||||
// "best_compression" -> uses gzip level 9 for minimal disk space useage
|
|
||||||
//
|
|
||||||
// Defaults to "best_speed" (level 1)
|
|
||||||
CompressionLevel string `default:"best_speed" yaml:"compression_level"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Transfers struct {
|
type Transfers struct {
|
||||||
|
|||||||
@@ -78,14 +78,6 @@ type DockerConfiguration struct {
|
|||||||
Overhead Overhead `json:"overhead" yaml:"overhead"`
|
Overhead Overhead `json:"overhead" yaml:"overhead"`
|
||||||
|
|
||||||
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
|
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
|
||||||
|
|
||||||
// Sets the user namespace mode for the container when user namespace remapping option is
|
|
||||||
// enabled.
|
|
||||||
//
|
|
||||||
// If the value is blank, the daemon's user namespace remapping configuration is used,
|
|
||||||
// if the value is "host", then the pterodactyl containers are started with user namespace
|
|
||||||
// remapping disabled
|
|
||||||
UsernsMode string `default:"" json:"userns_mode" yaml:"userns_mode"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegistryConfiguration defines the authentication credentials for a given
|
// RegistryConfiguration defines the authentication credentials for a given
|
||||||
|
|||||||
@@ -12,11 +12,6 @@ import (
|
|||||||
// Defines the allocations available for a given server. When using the Docker environment
|
// Defines the allocations available for a given server. When using the Docker environment
|
||||||
// driver these correspond to mappings for the container that allow external connections.
|
// driver these correspond to mappings for the container that allow external connections.
|
||||||
type Allocations struct {
|
type Allocations struct {
|
||||||
// ForceOutgoingIP causes a dedicated bridge network to be created for the
|
|
||||||
// server with a special option, causing Docker to SNAT outgoing traffic to
|
|
||||||
// the DefaultMapping's IP. This is important to servers which rely on external
|
|
||||||
// services that check the IP of the server (Source Engine servers, for example).
|
|
||||||
ForceOutgoingIP bool `json:"force_outgoing_ip"`
|
|
||||||
// Defines the default allocation that should be used for this server. This is
|
// Defines the default allocation that should be used for this server. This is
|
||||||
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
||||||
// files or the startup arguments for a server.
|
// files or the startup arguments for a server.
|
||||||
|
|||||||
@@ -41,12 +41,12 @@ func ConfigureDocker(ctx context.Context) error {
|
|||||||
nw := config.Get().Docker.Network
|
nw := config.Get().Docker.Network
|
||||||
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
|
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !client.IsErrNotFound(err) {
|
if client.IsErrNotFound(err) {
|
||||||
return err
|
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
|
||||||
}
|
if err := createDockerNetwork(ctx, cli); err != nil {
|
||||||
|
return err
|
||||||
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
|
}
|
||||||
if err := createDockerNetwork(ctx, cli); err != nil {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -147,12 +147,10 @@ func (e *Environment) InSituUpdate() error {
|
|||||||
// currently available for it. If the container already exists it will be
|
// currently available for it. If the container already exists it will be
|
||||||
// returned.
|
// returned.
|
||||||
func (e *Environment) Create() error {
|
func (e *Environment) Create() error {
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// If the container already exists don't hit the user with an error, just return
|
// If the container already exists don't hit the user with an error, just return
|
||||||
// the current information about it which is what we would do when creating the
|
// the current information about it which is what we would do when creating the
|
||||||
// container anyways.
|
// container anyways.
|
||||||
if _, err := e.ContainerInspect(ctx); err == nil {
|
if _, err := e.ContainerInspect(context.Background()); err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else if !client.IsErrNotFound(err) {
|
} else if !client.IsErrNotFound(err) {
|
||||||
return errors.Wrap(err, "environment/docker: failed to inspect container")
|
return errors.Wrap(err, "environment/docker: failed to inspect container")
|
||||||
@@ -192,34 +190,7 @@ func (e *Environment) Create() error {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
networkMode := container.NetworkMode(config.Get().Docker.Network.Mode)
|
tmpfsSize := strconv.Itoa(int(config.Get().Docker.TmpfsSize))
|
||||||
if a.ForceOutgoingIP {
|
|
||||||
e.log().Debug("environment/docker: forcing outgoing IP address")
|
|
||||||
networkName := strings.ReplaceAll(e.Id, "-", "")
|
|
||||||
networkMode = container.NetworkMode(networkName)
|
|
||||||
|
|
||||||
if _, err := e.client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}); err != nil {
|
|
||||||
if !client.IsErrNotFound(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := e.client.NetworkCreate(ctx, networkName, types.NetworkCreate{
|
|
||||||
Driver: "bridge",
|
|
||||||
EnableIPv6: false,
|
|
||||||
Internal: false,
|
|
||||||
Attachable: false,
|
|
||||||
Ingress: false,
|
|
||||||
ConfigOnly: false,
|
|
||||||
Options: map[string]string{
|
|
||||||
"encryption": "false",
|
|
||||||
"com.docker.network.bridge.default_bridge": "false",
|
|
||||||
"com.docker.network.host_ipv4": a.DefaultMapping.Ip,
|
|
||||||
},
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hostConf := &container.HostConfig{
|
hostConf := &container.HostConfig{
|
||||||
PortBindings: a.DockerBindings(),
|
PortBindings: a.DockerBindings(),
|
||||||
@@ -231,7 +202,7 @@ func (e *Environment) Create() error {
|
|||||||
// Configure the /tmp folder mapping in containers. This is necessary for some
|
// Configure the /tmp folder mapping in containers. This is necessary for some
|
||||||
// games that need to make use of it for downloads and other installation processes.
|
// games that need to make use of it for downloads and other installation processes.
|
||||||
Tmpfs: map[string]string{
|
Tmpfs: map[string]string{
|
||||||
"/tmp": "rw,exec,nosuid,size=" + strconv.Itoa(int(config.Get().Docker.TmpfsSize)) + "M",
|
"/tmp": "rw,exec,nosuid,size=" + tmpfsSize + "M",
|
||||||
},
|
},
|
||||||
|
|
||||||
// Define resource limits for the container based on the data passed through
|
// Define resource limits for the container based on the data passed through
|
||||||
@@ -260,11 +231,10 @@ func (e *Environment) Create() error {
|
|||||||
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
||||||
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
||||||
},
|
},
|
||||||
NetworkMode: networkMode,
|
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||||
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {
|
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil {
|
||||||
return errors.Wrap(err, "environment/docker: failed to create container")
|
return errors.Wrap(err, "environment/docker: failed to create container")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
119
go.mod
119
go.mod
@@ -1,124 +1,129 @@
|
|||||||
module github.com/pterodactyl/wings
|
module github.com/pterodactyl/wings
|
||||||
|
|
||||||
go 1.18
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
emperror.dev/errors v0.8.1
|
emperror.dev/errors v0.8.1
|
||||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
github.com/AlecAivazis/survey/v2 v2.3.4
|
||||||
github.com/Jeffail/gabs/v2 v2.6.1
|
github.com/Jeffail/gabs/v2 v2.6.1
|
||||||
github.com/NYTimes/logrotate v1.0.0
|
github.com/NYTimes/logrotate v1.0.0
|
||||||
github.com/apex/log v1.9.0
|
github.com/apex/log v1.9.0
|
||||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||||
github.com/beevik/etree v1.1.0
|
github.com/beevik/etree v1.1.0
|
||||||
github.com/buger/jsonparser v1.1.1
|
github.com/buger/jsonparser v1.1.1
|
||||||
github.com/cenkalti/backoff/v4 v4.1.3
|
github.com/cenkalti/backoff/v4 v4.1.2
|
||||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
||||||
github.com/creasty/defaults v1.6.0
|
github.com/creasty/defaults v1.5.2
|
||||||
github.com/docker/docker v20.10.18+incompatible
|
github.com/docker/docker v20.10.14+incompatible
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/fatih/color v1.13.0
|
github.com/fatih/color v1.13.0
|
||||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
||||||
github.com/gabriel-vasile/mimetype v1.4.1
|
github.com/gabriel-vasile/mimetype v1.4.0
|
||||||
github.com/gammazero/workerpool v1.1.3
|
github.com/gammazero/workerpool v1.1.2
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
github.com/gin-gonic/gin v1.8.1
|
github.com/gin-gonic/gin v1.7.7
|
||||||
github.com/glebarez/sqlite v1.4.8
|
|
||||||
github.com/go-co-op/gocron v1.17.0
|
|
||||||
github.com/goccy/go-json v0.9.11
|
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/gorilla/websocket v1.5.0
|
||||||
github.com/iancoleman/strcase v0.2.0
|
github.com/iancoleman/strcase v0.2.0
|
||||||
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
|
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
|
||||||
github.com/juju/ratelimit v1.0.2
|
github.com/juju/ratelimit v1.0.1
|
||||||
github.com/karrick/godirwalk v1.17.0
|
github.com/karrick/godirwalk v1.16.1
|
||||||
github.com/klauspost/compress v1.15.11
|
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/magiconair/properties v1.8.6
|
github.com/magiconair/properties v1.8.6
|
||||||
github.com/mattn/go-colorable v0.1.13
|
github.com/mattn/go-colorable v0.1.12
|
||||||
github.com/mholt/archiver/v3 v3.5.1
|
github.com/mholt/archiver/v3 v3.5.1
|
||||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/sftp v1.13.5
|
github.com/pkg/sftp v1.13.4
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||||
github.com/spf13/cobra v1.5.0
|
github.com/spf13/cobra v1.4.0
|
||||||
github.com/stretchr/testify v1.8.0
|
github.com/stretchr/testify v1.7.5
|
||||||
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
|
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
|
||||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
gopkg.in/ini.v1 v1.67.0
|
gopkg.in/ini.v1 v1.66.4
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
|
||||||
gorm.io/gorm v1.23.10
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/glebarez/sqlite v1.4.6
|
||||||
|
github.com/go-co-op/gocron v1.15.0
|
||||||
|
github.com/goccy/go-json v0.9.6
|
||||||
|
github.com/klauspost/compress v1.15.1
|
||||||
|
gorm.io/gorm v1.23.8
|
||||||
|
)
|
||||||
|
|
||||||
|
require golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
|
github.com/containerd/containerd v1.6.2 // indirect
|
||||||
github.com/containerd/fifo v1.0.0 // indirect
|
github.com/containerd/fifo v1.0.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/docker/go-units v0.5.0 // indirect
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
github.com/gammazero/deque v0.2.0 // indirect
|
github.com/gammazero/deque v0.1.1 // indirect
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||||
github.com/glebarez/go-sqlite v1.19.1 // indirect
|
github.com/glebarez/go-sqlite v1.17.3 // indirect
|
||||||
github.com/go-playground/locales v0.14.0 // indirect
|
github.com/go-playground/locales v0.14.0 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.11.1 // indirect
|
github.com/go-playground/validator/v10 v10.10.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/gorilla/mux v1.7.4 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/leodido/go-urn v1.2.1 // indirect
|
github.com/leodido/go-urn v1.2.1 // indirect
|
||||||
github.com/magefile/mage v1.14.0 // indirect
|
github.com/magefile/mage v1.13.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/nwaples/rardecode v1.1.3 // indirect
|
github.com/nwaples/rardecode v1.1.3 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
github.com/pierrec/lz4/v4 v4.1.14 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
github.com/prometheus/common v0.32.1 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
github.com/ugorji/go/codec v1.2.7 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
||||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
|
|
||||||
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
|
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
|
||||||
golang.org/x/tools v0.1.12 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/grpc v1.45.0 // indirect
|
||||||
modernc.org/libc v1.20.0 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
modernc.org/mathutil v1.5.0 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
modernc.org/memory v1.4.0 // indirect
|
modernc.org/libc v1.16.17 // indirect
|
||||||
modernc.org/sqlite v1.19.1 // indirect
|
modernc.org/mathutil v1.4.1 // indirect
|
||||||
|
modernc.org/memory v1.1.1 // indirect
|
||||||
|
modernc.org/sqlite v1.17.3 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"github.com/icza/dyno"
|
"github.com/icza/dyno"
|
||||||
"github.com/magiconair/properties"
|
"github.com/magiconair/properties"
|
||||||
"gopkg.in/ini.v1"
|
"gopkg.in/ini.v1"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,11 +3,10 @@ package remote
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|||||||
@@ -2,11 +2,10 @@ package remote
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/parser"
|
"github.com/pterodactyl/wings/parser"
|
||||||
)
|
)
|
||||||
@@ -157,15 +156,9 @@ type BackupRemoteUploadResponse struct {
|
|||||||
PartSize int64 `json:"part_size"`
|
PartSize int64 `json:"part_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type BackupPart struct {
|
|
||||||
ETag string `json:"etag"`
|
|
||||||
PartNumber int `json:"part_number"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type BackupRequest struct {
|
type BackupRequest struct {
|
||||||
Checksum string `json:"checksum"`
|
Checksum string `json:"checksum"`
|
||||||
ChecksumType string `json:"checksum_type"`
|
ChecksumType string `json:"checksum_type"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Successful bool `json:"successful"`
|
Successful bool `json:"successful"`
|
||||||
Parts []BackupPart `json:"parts"`
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
"github.com/pterodactyl/wings/router/middleware"
|
"github.com/pterodactyl/wings/router/middleware"
|
||||||
wserver "github.com/pterodactyl/wings/server"
|
wserver "github.com/pterodactyl/wings/server"
|
||||||
@@ -16,7 +15,6 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
|
|||||||
|
|
||||||
router := gin.New()
|
router := gin.New()
|
||||||
router.Use(gin.Recovery())
|
router.Use(gin.Recovery())
|
||||||
router.SetTrustedProxies(config.Get().Api.TrustedProxies)
|
|
||||||
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
|
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
|
||||||
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
|
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
|
||||||
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
||||||
|
|||||||
@@ -180,7 +180,7 @@ func postServerReinstall(c *gin.Context) {
|
|||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deletes a server from the wings daemon and dissociate its objects.
|
// Deletes a server from the wings daemon and dissociate it's objects.
|
||||||
func deleteServer(c *gin.Context) {
|
func deleteServer(c *gin.Context) {
|
||||||
s := middleware.ExtractServer(c)
|
s := middleware.ExtractServer(c)
|
||||||
|
|
||||||
|
|||||||
@@ -602,7 +602,7 @@ func postServerUploadFiles(c *gin.Context) {
|
|||||||
NewServerError(err, s).Abort(c)
|
NewServerError(err, s).Abort(c)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.ClientIP()), server.ActivityFileUploaded, models.ActivityMeta{
|
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.Request.RemoteAddr), server.ActivityFileUploaded, models.ActivityMeta{
|
||||||
"file": header.Filename,
|
"file": header.Filename,
|
||||||
"directory": filepath.Clean(directory),
|
"directory": filepath.Clean(directory),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func getServerWebsocket(c *gin.Context) {
|
|||||||
ctx, cancel := context.WithCancel(c.Request.Context())
|
ctx, cancel := context.WithCancel(c.Request.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
handler, err := websocket.GetHandler(s, c.Writer, c.Request, c)
|
handler, err := websocket.GetHandler(s, c.Writer, c.Request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
NewServerError(err, s).Abort(c)
|
NewServerError(err, s).Abort(c)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
@@ -29,9 +30,19 @@ import (
|
|||||||
"github.com/pterodactyl/wings/router/tokens"
|
"github.com/pterodactyl/wings/router/tokens"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"github.com/pterodactyl/wings/server/filesystem"
|
"github.com/pterodactyl/wings/server/filesystem"
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
const progressWidth = 25
|
// Number of ticks in the progress bar
|
||||||
|
const ticks = 25
|
||||||
|
|
||||||
|
// 100% / number of ticks = percentage represented by each tick
|
||||||
|
const tickPercentage = 100 / ticks
|
||||||
|
|
||||||
|
type downloadProgress struct {
|
||||||
|
size int64
|
||||||
|
progress int64
|
||||||
|
}
|
||||||
|
|
||||||
// Data passed over to initiate a server transfer.
|
// Data passed over to initiate a server transfer.
|
||||||
type serverTransferRequest struct {
|
type serverTransferRequest struct {
|
||||||
@@ -84,7 +95,7 @@ func getServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute sha256 checksum.
|
// Compute sha1 checksum.
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
f, err := os.Open(archivePath)
|
f, err := os.Open(archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -173,35 +184,11 @@ func postServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the disk usage of the server (used to calculate the progress of the archive process)
|
|
||||||
rawSize, err := s.Filesystem().DiskUsage(true)
|
|
||||||
if err != nil {
|
|
||||||
sendTransferLog("Failed to get disk usage for server, aborting transfer..")
|
|
||||||
l.WithField("error", err).Error("failed to get disk usage for server")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an archive of the entire server's data directory.
|
// Create an archive of the entire server's data directory.
|
||||||
a := &filesystem.Archive{
|
a := &filesystem.Archive{
|
||||||
BasePath: s.Filesystem().Path(),
|
BasePath: s.Filesystem().Path(),
|
||||||
Progress: filesystem.NewProgress(rawSize),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send the archive progress to the websocket every 3 seconds.
|
|
||||||
ctx, cancel := context.WithCancel(s.Context())
|
|
||||||
defer cancel()
|
|
||||||
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
|
|
||||||
defer t.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-t.C:
|
|
||||||
sendTransferLog("Archiving " + p.Progress(progressWidth))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}(ctx, a.Progress, time.NewTicker(5*time.Second))
|
|
||||||
|
|
||||||
// Attempt to get an archive of the server.
|
// Attempt to get an archive of the server.
|
||||||
if err := a.Create(getArchivePath(s.ID())); err != nil {
|
if err := a.Create(getArchivePath(s.ID())); err != nil {
|
||||||
sendTransferLog("An error occurred while archiving the server: " + err.Error())
|
sendTransferLog("An error occurred while archiving the server: " + err.Error())
|
||||||
@@ -209,12 +196,6 @@ func postServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel the progress ticker.
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
// Show 100% completion.
|
|
||||||
sendTransferLog("Archiving " + a.Progress.Progress(progressWidth))
|
|
||||||
|
|
||||||
sendTransferLog("Successfully created archive, attempting to notify panel..")
|
sendTransferLog("Successfully created archive, attempting to notify panel..")
|
||||||
l.Info("successfully created server transfer archive, notifying panel..")
|
l.Info("successfully created server transfer archive, notifying panel..")
|
||||||
|
|
||||||
@@ -242,6 +223,12 @@ func postServerArchive(c *gin.Context) {
|
|||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *downloadProgress) Write(v []byte) (int, error) {
|
||||||
|
n := len(v)
|
||||||
|
atomic.AddInt64(&w.progress, int64(n))
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Log helper function to attach all errors and info output to a consistently formatted
|
// Log helper function to attach all errors and info output to a consistently formatted
|
||||||
// log string for easier querying.
|
// log string for easier querying.
|
||||||
func (str serverTransferRequest) log() *log.Entry {
|
func (str serverTransferRequest) log() *log.Entry {
|
||||||
@@ -334,7 +321,7 @@ func postTransfer(c *gin.Context) {
|
|||||||
manager := middleware.ExtractManager(c)
|
manager := middleware.ExtractManager(c)
|
||||||
u, err := uuid.Parse(data.ServerID)
|
u, err := uuid.Parse(data.ServerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = WithError(c, err)
|
WithError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Force the server ID to be a valid UUID string at this point. If it is not an error
|
// Force the server ID to be a valid UUID string at this point. If it is not an error
|
||||||
@@ -344,12 +331,11 @@ func postTransfer(c *gin.Context) {
|
|||||||
|
|
||||||
data.log().Info("handling incoming server transfer request")
|
data.log().Info("handling incoming server transfer request")
|
||||||
go func(data *serverTransferRequest) {
|
go func(data *serverTransferRequest) {
|
||||||
ctx := context.Background()
|
|
||||||
hasError := true
|
hasError := true
|
||||||
|
|
||||||
// Create a new server installer. This will only configure the environment and not
|
// Create a new server installer. This will only configure the environment and not
|
||||||
// run the installer scripts.
|
// run the installer scripts.
|
||||||
i, err := installer.New(ctx, manager, data.Server)
|
i, err := installer.New(context.Background(), manager, data.Server)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = data.sendTransferStatus(manager.Client(), false)
|
_ = data.sendTransferStatus(manager.Client(), false)
|
||||||
data.log().WithField("error", err).Error("failed to validate received server data")
|
data.log().WithField("error", err).Error("failed to validate received server data")
|
||||||
@@ -421,22 +407,25 @@ func postTransfer(c *gin.Context) {
|
|||||||
sendTransferLog("Writing archive to disk...")
|
sendTransferLog("Writing archive to disk...")
|
||||||
data.log().Info("writing transfer archive to disk...")
|
data.log().Info("writing transfer archive to disk...")
|
||||||
|
|
||||||
progress := filesystem.NewProgress(size)
|
// Copy the file.
|
||||||
|
progress := &downloadProgress{size: size}
|
||||||
// Send the archive progress to the websocket every 3 seconds.
|
ticker := time.NewTicker(3 * time.Second)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
go func(progress *downloadProgress, t *time.Ticker) {
|
||||||
defer cancel()
|
for range ticker.C {
|
||||||
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
|
// p = 100 (Downloaded)
|
||||||
defer t.Stop()
|
// size = 1000 (Content-Length)
|
||||||
for {
|
// p / size = 0.1
|
||||||
select {
|
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
||||||
case <-ctx.Done():
|
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
||||||
return
|
// 2.5 (Number of ticks as a float64)
|
||||||
case <-t.C:
|
// 2 (convert to an integer)
|
||||||
sendTransferLog("Downloading " + p.Progress(progressWidth))
|
p := atomic.LoadInt64(&progress.progress)
|
||||||
}
|
// We have to cast these numbers to float in order to get a float result from the division.
|
||||||
|
width := ((float64(p) / float64(size)) * 100) / tickPercentage
|
||||||
|
bar := strings.Repeat("=", int(width)) + strings.Repeat(" ", ticks-int(width))
|
||||||
|
sendTransferLog("Downloading [" + bar + "] " + system.FormatBytes(p) + " / " + system.FormatBytes(progress.size))
|
||||||
}
|
}
|
||||||
}(ctx, progress, time.NewTicker(5*time.Second))
|
}(progress, ticker)
|
||||||
|
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
|
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
|
||||||
@@ -449,16 +438,18 @@ func postTransfer(c *gin.Context) {
|
|||||||
|
|
||||||
buf := make([]byte, 1024*4)
|
buf := make([]byte, 1024*4)
|
||||||
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
|
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
|
||||||
|
ticker.Stop()
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
|
|
||||||
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
|
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
|
||||||
data.log().WithField("error", err).Error("failed to copy archive file to disk")
|
data.log().WithField("error", err).Error("failed to copy archive file to disk")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cancel()
|
ticker.Stop()
|
||||||
|
|
||||||
// Show 100% completion.
|
// Show 100% completion.
|
||||||
sendTransferLog("Downloading " + progress.Progress(progressWidth))
|
humanSize := system.FormatBytes(progress.size)
|
||||||
|
sendTransferLog("Downloading [" + strings.Repeat("=", ticks) + "] " + humanSize + " / " + humanSize)
|
||||||
|
|
||||||
if err := file.Close(); err != nil {
|
if err := file.Close(); err != nil {
|
||||||
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")
|
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
@@ -80,7 +79,7 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetHandler returns a new websocket handler using the context provided.
|
// GetHandler returns a new websocket handler using the context provided.
|
||||||
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin.Context) (*Handler, error) {
|
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Handler, error) {
|
||||||
upgrader := websocket.Upgrader{
|
upgrader := websocket.Upgrader{
|
||||||
// Ensure that the websocket request is originating from the Panel itself,
|
// Ensure that the websocket request is originating from the Panel itself,
|
||||||
// and not some other location.
|
// and not some other location.
|
||||||
@@ -112,7 +111,7 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin
|
|||||||
Connection: conn,
|
Connection: conn,
|
||||||
jwt: nil,
|
jwt: nil,
|
||||||
server: s,
|
server: s,
|
||||||
ra: s.NewRequestActivity("", c.ClientIP()),
|
ra: s.NewRequestActivity("", r.RemoteAddr),
|
||||||
uuid: u,
|
uuid: u,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
Name: ptero-wings
|
Name: ptero-wings
|
||||||
Version: 1.7.0
|
Version: 1.5.3
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
|
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
|
||||||
BuildArch: x86_64
|
BuildArch: x86_64
|
||||||
@@ -91,9 +91,6 @@ rm -rf /var/log/pterodactyl
|
|||||||
wings --version
|
wings --version
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
* Wed Sep 14 2022 Chance Callahan <ccallaha@redhat.com> - 1.7.0-1
|
|
||||||
- Updating specfile to match stable release.
|
|
||||||
|
|
||||||
* Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1
|
* Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1
|
||||||
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
|
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
|
||||||
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3
|
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ type RestoreCallback func(file string, r io.Reader, mode fs.FileMode, atime, mti
|
|||||||
// noinspection GoNameStartsWithPackageName
|
// noinspection GoNameStartsWithPackageName
|
||||||
type BackupInterface interface {
|
type BackupInterface interface {
|
||||||
// SetClient sets the API request client on the backup interface.
|
// SetClient sets the API request client on the backup interface.
|
||||||
SetClient(remote.Client)
|
SetClient(c remote.Client)
|
||||||
// Identifier returns the UUID of this backup as tracked by the panel
|
// Identifier returns the UUID of this backup as tracked by the panel
|
||||||
// instance.
|
// instance.
|
||||||
Identifier() string
|
Identifier() string
|
||||||
@@ -41,7 +41,7 @@ type BackupInterface interface {
|
|||||||
WithLogContext(map[string]interface{})
|
WithLogContext(map[string]interface{})
|
||||||
// Generate creates a backup in whatever the configured source for the
|
// Generate creates a backup in whatever the configured source for the
|
||||||
// specific implementation is.
|
// specific implementation is.
|
||||||
Generate(context.Context, string, string) (*ArchiveDetails, error)
|
Generate(ctx context.Context, basePath string, ignore string) (*ArchiveDetails, error)
|
||||||
// Ignored returns the ignored files for this backup instance.
|
// Ignored returns the ignored files for this backup instance.
|
||||||
Ignored() string
|
Ignored() string
|
||||||
// Checksum returns a SHA1 checksum for the generated backup.
|
// Checksum returns a SHA1 checksum for the generated backup.
|
||||||
@@ -53,13 +53,13 @@ type BackupInterface interface {
|
|||||||
// to store it until it is moved to the final spot.
|
// to store it until it is moved to the final spot.
|
||||||
Path() string
|
Path() string
|
||||||
// Details returns details about the archive.
|
// Details returns details about the archive.
|
||||||
Details(context.Context, []remote.BackupPart) (*ArchiveDetails, error)
|
Details(ctx context.Context) (*ArchiveDetails, error)
|
||||||
// Remove removes a backup file.
|
// Remove removes a backup file.
|
||||||
Remove() error
|
Remove() error
|
||||||
// Restore is called when a backup is ready to be restored to the disk from
|
// Restore is called when a backup is ready to be restored to the disk from
|
||||||
// the given source. Not every backup implementation will support this nor
|
// the given source. Not every backup implementation will support this nor
|
||||||
// will every implementation require a reader be provided.
|
// will every implementation require a reader be provided.
|
||||||
Restore(context.Context, io.Reader, RestoreCallback) error
|
Restore(ctx context.Context, reader io.Reader, callback RestoreCallback) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type Backup struct {
|
type Backup struct {
|
||||||
@@ -119,8 +119,8 @@ func (b *Backup) Checksum() ([]byte, error) {
|
|||||||
|
|
||||||
// Details returns both the checksum and size of the archive currently stored on
|
// Details returns both the checksum and size of the archive currently stored on
|
||||||
// the disk to the caller.
|
// the disk to the caller.
|
||||||
func (b *Backup) Details(ctx context.Context, parts []remote.BackupPart) (*ArchiveDetails, error) {
|
func (b *Backup) Details(ctx context.Context) (*ArchiveDetails, error) {
|
||||||
ad := ArchiveDetails{ChecksumType: "sha1", Parts: parts}
|
ad := ArchiveDetails{ChecksumType: "sha1"}
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
@@ -162,10 +162,9 @@ func (b *Backup) log() *log.Entry {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ArchiveDetails struct {
|
type ArchiveDetails struct {
|
||||||
Checksum string `json:"checksum"`
|
Checksum string `json:"checksum"`
|
||||||
ChecksumType string `json:"checksum_type"`
|
ChecksumType string `json:"checksum_type"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Parts []remote.BackupPart `json:"parts"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToRequest returns a request object.
|
// ToRequest returns a request object.
|
||||||
@@ -175,6 +174,5 @@ func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
|
|||||||
ChecksumType: ad.ChecksumType,
|
ChecksumType: ad.ChecksumType,
|
||||||
Size: ad.Size,
|
Size: ad.Size,
|
||||||
Successful: successful,
|
Successful: successful,
|
||||||
Parts: ad.Parts,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
|
|||||||
}
|
}
|
||||||
b.log().Info("created backup successfully")
|
b.log().Info("created backup successfully")
|
||||||
|
|
||||||
ad, err := b.Details(ctx, nil)
|
ad, err := b.Details(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
|
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,11 +71,10 @@ func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*Arch
|
|||||||
}
|
}
|
||||||
defer rc.Close()
|
defer rc.Close()
|
||||||
|
|
||||||
parts, err := s.generateRemoteRequest(ctx, rc)
|
if err := s.generateRemoteRequest(ctx, rc); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ad, err := s.Details(ctx, parts)
|
ad, err := s.Details(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
|
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
|
||||||
}
|
}
|
||||||
@@ -126,20 +125,20 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generates the remote S3 request and begins the upload.
|
// Generates the remote S3 request and begins the upload.
|
||||||
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) ([]remote.BackupPart, error) {
|
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) error {
|
||||||
defer rc.Close()
|
defer rc.Close()
|
||||||
|
|
||||||
s.log().Debug("attempting to get size of backup...")
|
s.log().Debug("attempting to get size of backup...")
|
||||||
size, err := s.Backup.Size()
|
size, err := s.Backup.Size()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
s.log().WithField("size", size).Debug("got size of backup")
|
s.log().WithField("size", size).Debug("got size of backup")
|
||||||
|
|
||||||
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
||||||
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
s.log().Debug("got S3 upload urls from the Panel")
|
s.log().Debug("got S3 upload urls from the Panel")
|
||||||
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
||||||
@@ -157,26 +156,22 @@ func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to upload the part.
|
// Attempt to upload the part.
|
||||||
etag, err := uploader.uploadPart(ctx, part, partSize)
|
if _, err := uploader.uploadPart(ctx, part, partSize); err != nil {
|
||||||
if err != nil {
|
|
||||||
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
uploader.uploadedParts = append(uploader.uploadedParts, remote.BackupPart{
|
|
||||||
ETag: etag,
|
|
||||||
PartNumber: i + 1,
|
|
||||||
})
|
|
||||||
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
||||||
|
|
||||||
return uploader.uploadedParts, nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type s3FileUploader struct {
|
type s3FileUploader struct {
|
||||||
io.ReadCloser
|
io.ReadCloser
|
||||||
client *http.Client
|
client *http.Client
|
||||||
uploadedParts []remote.BackupPart
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newS3FileUploader returns a new file uploader instance.
|
// newS3FileUploader returns a new file uploader instance.
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
@@ -18,7 +17,6 @@ import (
|
|||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/system"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const memory = 4 * 1024
|
const memory = 4 * 1024
|
||||||
@@ -30,62 +28,6 @@ var pool = sync.Pool{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Progress is used to track the progress of any I/O operation that are being
|
|
||||||
// performed.
|
|
||||||
type Progress struct {
|
|
||||||
// written is the total size of the files that have been written to the writer.
|
|
||||||
written int64
|
|
||||||
// Total is the total size of the archive in bytes.
|
|
||||||
total int64
|
|
||||||
// w .
|
|
||||||
w io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewProgress .
|
|
||||||
func NewProgress(total int64) *Progress {
|
|
||||||
return &Progress{total: total}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Written returns the total number of bytes written.
|
|
||||||
// This function should be used when the progress is tracking data being written.
|
|
||||||
func (p *Progress) Written() int64 {
|
|
||||||
return atomic.LoadInt64(&p.written)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Total returns the total size in bytes.
|
|
||||||
func (p *Progress) Total() int64 {
|
|
||||||
return atomic.LoadInt64(&p.total)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write totals the number of bytes that have been written to the writer.
|
|
||||||
func (p *Progress) Write(v []byte) (int, error) {
|
|
||||||
n := len(v)
|
|
||||||
atomic.AddInt64(&p.written, int64(n))
|
|
||||||
if p.w != nil {
|
|
||||||
return p.w.Write(v)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Progress returns a formatted progress string for the current progress.
|
|
||||||
func (p *Progress) Progress(width int) string {
|
|
||||||
current := p.Written()
|
|
||||||
total := p.Total()
|
|
||||||
|
|
||||||
// v = 100 (Progress)
|
|
||||||
// size = 1000 (Content-Length)
|
|
||||||
// p / size = 0.1
|
|
||||||
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
|
||||||
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
|
||||||
// 2.5 (Number of ticks as a float64)
|
|
||||||
// 2 (convert to an integer)
|
|
||||||
|
|
||||||
// We have to cast these numbers to float in order to get a float result from the division.
|
|
||||||
ticks := ((float64(current) / float64(total)) * 100) / (float64(100) / float64(width))
|
|
||||||
bar := strings.Repeat("=", int(ticks)) + strings.Repeat(" ", width-int(ticks))
|
|
||||||
return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Archive struct {
|
type Archive struct {
|
||||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
||||||
// relative to.
|
// relative to.
|
||||||
@@ -98,13 +40,10 @@ type Archive struct {
|
|||||||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
||||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
||||||
Files []string
|
Files []string
|
||||||
|
|
||||||
// Progress wraps the writer of the archive to pass through the progress tracker.
|
|
||||||
Progress *Progress
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates an archive at dst with all the files defined in the
|
// Create creates an archive at dst with all of the files defined in the
|
||||||
// included Files array.
|
// included files struct.
|
||||||
func (a *Archive) Create(dst string) error {
|
func (a *Archive) Create(dst string) error {
|
||||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -123,34 +62,13 @@ func (a *Archive) Create(dst string) error {
|
|||||||
writer = f
|
writer = f
|
||||||
}
|
}
|
||||||
|
|
||||||
// Choose which compression level to use based on the compression_level configuration option
|
|
||||||
var compressionLevel int
|
|
||||||
switch config.Get().System.Backups.CompressionLevel {
|
|
||||||
case "none":
|
|
||||||
compressionLevel = pgzip.NoCompression
|
|
||||||
case "best_compression":
|
|
||||||
compressionLevel = pgzip.BestCompression
|
|
||||||
case "best_speed":
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
compressionLevel = pgzip.BestSpeed
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new gzip writer around the file.
|
// Create a new gzip writer around the file.
|
||||||
gw, _ := pgzip.NewWriterLevel(writer, compressionLevel)
|
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
|
||||||
_ = gw.SetConcurrency(1<<20, 1)
|
_ = gw.SetConcurrency(1<<20, 1)
|
||||||
defer gw.Close()
|
defer gw.Close()
|
||||||
|
|
||||||
var pw io.Writer
|
|
||||||
if a.Progress != nil {
|
|
||||||
a.Progress.w = gw
|
|
||||||
pw = a.Progress
|
|
||||||
} else {
|
|
||||||
pw = gw
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new tar writer around the gzip writer.
|
// Create a new tar writer around the gzip writer.
|
||||||
tw := tar.NewWriter(pw)
|
tw := tar.NewWriter(gw)
|
||||||
defer tw.Close()
|
defer tw.Close()
|
||||||
|
|
||||||
// Configure godirwalk.
|
// Configure godirwalk.
|
||||||
@@ -185,7 +103,7 @@ func (a *Archive) Create(dst string) error {
|
|||||||
// being generated.
|
// being generated.
|
||||||
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
||||||
return func(path string, de *godirwalk.Dirent) error {
|
return func(path string, de *godirwalk.Dirent) error {
|
||||||
// Skip directories because we are walking them recursively.
|
// Skip directories because we walking them recursively.
|
||||||
if de.IsDir() {
|
if de.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -230,7 +148,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
|
|||||||
// Adds a given file path to the final archive being created.
|
// Adds a given file path to the final archive being created.
|
||||||
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
||||||
// Lstat the file, this will give us the same information as Stat except that it will not
|
// Lstat the file, this will give us the same information as Stat except that it will not
|
||||||
// follow a symlink to its target automatically. This is important to avoid including
|
// follow a symlink to it's target automatically. This is important to avoid including
|
||||||
// files that exist outside the server root unintentionally in the backup.
|
// files that exist outside the server root unintentionally in the backup.
|
||||||
s, err := os.Lstat(p)
|
s, err := os.Lstat(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
|||||||
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
||||||
//
|
//
|
||||||
// Technically we could skip disk space calculation because we don't need to check if the
|
// Technically we could skip disk space calculation because we don't need to check if the
|
||||||
// server exceeds its limit but because this method caches the disk usage it would be best
|
// server exceeds it's limit but because this method caches the disk usage it would be best
|
||||||
// to calculate the disk usage and always return true.
|
// to calculate the disk usage and always return true.
|
||||||
if fs.MaxDisk() == 0 {
|
if fs.MaxDisk() == 0 {
|
||||||
return true
|
return true
|
||||||
|
|||||||
@@ -159,7 +159,7 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
|||||||
// Adjust the disk usage to account for the old size and the new size of the file.
|
// Adjust the disk usage to account for the old size and the new size of the file.
|
||||||
fs.addDisk(sz - currentSize)
|
fs.addDisk(sz - currentSize)
|
||||||
|
|
||||||
return fs.unsafeChown(cleaned)
|
return fs.Chown(cleaned)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new directory (name) at a specified path (p) for the server.
|
// Creates a new directory (name) at a specified path (p) for the server.
|
||||||
@@ -217,12 +217,7 @@ func (fs *Filesystem) Chown(path string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fs.unsafeChown(cleaned)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unsafeChown chowns the given path, without checking if the path is safe. This should only be used
|
|
||||||
// when the path has already been checked.
|
|
||||||
func (fs *Filesystem) unsafeChown(path string) error {
|
|
||||||
if fs.isTest {
|
if fs.isTest {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -231,19 +226,19 @@ func (fs *Filesystem) unsafeChown(path string) error {
|
|||||||
gid := config.Get().System.User.Gid
|
gid := config.Get().System.User.Gid
|
||||||
|
|
||||||
// Start by just chowning the initial path that we received.
|
// Start by just chowning the initial path that we received.
|
||||||
if err := os.Chown(path, uid, gid); err != nil {
|
if err := os.Chown(cleaned, uid, gid); err != nil {
|
||||||
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
|
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is not a directory we can now return from the function, there is nothing
|
// If this is not a directory we can now return from the function, there is nothing
|
||||||
// left that we need to do.
|
// left that we need to do.
|
||||||
if st, err := os.Stat(path); err != nil || !st.IsDir() {
|
if st, err := os.Stat(cleaned); err != nil || !st.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this was a directory, begin walking over its contents recursively and ensure that all
|
// If this was a directory, begin walking over its contents recursively and ensure that all
|
||||||
// of the subfiles and directories get their permissions updated as well.
|
// of the subfiles and directories get their permissions updated as well.
|
||||||
err := godirwalk.Walk(path, &godirwalk.Options{
|
err = godirwalk.Walk(cleaned, &godirwalk.Options{
|
||||||
Unsorted: true,
|
Unsorted: true,
|
||||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
Callback: func(p string, e *godirwalk.Dirent) error {
|
||||||
// Do not attempt to chown a symlink. Go's os.Chown function will affect the symlink
|
// Do not attempt to chown a symlink. Go's os.Chown function will affect the symlink
|
||||||
@@ -260,6 +255,7 @@ func (fs *Filesystem) unsafeChown(path string) error {
|
|||||||
return os.Chown(p, uid, gid)
|
return os.Chown(p, uid, gid)
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
return errors.Wrap(err, "server/filesystem: chown: failed to chown during walk function")
|
return errors.Wrap(err, "server/filesystem: chown: failed to chown during walk function")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -381,9 +377,10 @@ func (fs *Filesystem) TruncateRootDirectory() error {
|
|||||||
// Delete removes a file or folder from the system. Prevents the user from
|
// Delete removes a file or folder from the system. Prevents the user from
|
||||||
// accidentally (or maliciously) removing their root server data directory.
|
// accidentally (or maliciously) removing their root server data directory.
|
||||||
func (fs *Filesystem) Delete(p string) error {
|
func (fs *Filesystem) Delete(p string) error {
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
// This is one of the few (only?) places in the codebase where we're explicitly not using
|
// This is one of the few (only?) places in the codebase where we're explicitly not using
|
||||||
// the SafePath functionality when working with user provided input. If we did, you would
|
// the SafePath functionality when working with user provided input. If we did, you would
|
||||||
// not be able to delete a file that is a symlink pointing to a location outside the data
|
// not be able to delete a file that is a symlink pointing to a location outside of the data
|
||||||
// directory.
|
// directory.
|
||||||
//
|
//
|
||||||
// We also want to avoid resolving a symlink that points _within_ the data directory and thus
|
// We also want to avoid resolving a symlink that points _within_ the data directory and thus
|
||||||
@@ -400,66 +397,26 @@ func (fs *Filesystem) Delete(p string) error {
|
|||||||
return errors.New("cannot delete root server directory")
|
return errors.New("cannot delete root server directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
st, err := os.Lstat(resolved)
|
if st, err := os.Lstat(resolved); err != nil {
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
fs.error(err).Warn("error while attempting to stat file before deletion")
|
fs.error(err).Warn("error while attempting to stat file before deletion")
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following logic is used to handle a case where a user attempts to
|
|
||||||
// delete a file that does not exist through a directory symlink.
|
|
||||||
// We don't want to reveal that the file does not exist, so we validate
|
|
||||||
// the path of the symlink and return a bad path error if it is invalid.
|
|
||||||
|
|
||||||
// The requested file or directory doesn't exist, so at this point we
|
|
||||||
// need to iterate up the path chain until we hit a directory that
|
|
||||||
// _does_ exist and can be validated.
|
|
||||||
parts := strings.Split(filepath.Dir(resolved), "/")
|
|
||||||
|
|
||||||
// Range over all the path parts and form directory paths from the end
|
|
||||||
// moving up until we have a valid resolution, or we run out of paths to
|
|
||||||
// try.
|
|
||||||
for k := range parts {
|
|
||||||
try := strings.Join(parts[:(len(parts)-k)], "/")
|
|
||||||
if !fs.unsafeIsInDataDirectory(try) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
t, err := filepath.EvalSymlinks(try)
|
|
||||||
if err == nil {
|
|
||||||
if !fs.unsafeIsInDataDirectory(t) {
|
|
||||||
return NewBadPathResolution(p, t)
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always return early if the file does not exist.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the file is not a symlink, we need to check that it is not within a
|
|
||||||
// symlinked directory that points outside the data directory.
|
|
||||||
if st.Mode()&os.ModeSymlink == 0 {
|
|
||||||
ep, err := filepath.EvalSymlinks(resolved)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if !fs.unsafeIsInDataDirectory(ep) {
|
|
||||||
return NewBadPathResolution(p, ep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if st.IsDir() {
|
|
||||||
if s, err := fs.DirectorySize(resolved); err == nil {
|
|
||||||
fs.addDisk(-s)
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.addDisk(-st.Size())
|
if !st.IsDir() {
|
||||||
|
fs.addDisk(-st.Size())
|
||||||
|
} else {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(wg *sync.WaitGroup, st os.FileInfo, resolved string) {
|
||||||
|
defer wg.Done()
|
||||||
|
if s, err := fs.DirectorySize(resolved); err == nil {
|
||||||
|
fs.addDisk(-s)
|
||||||
|
}
|
||||||
|
}(&wg, st, resolved)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
return os.RemoveAll(resolved)
|
return os.RemoveAll(resolved)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -508,80 +508,6 @@ func TestFilesystem_Delete(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("deletes a symlink but not it's target within the root directory", func() {
|
|
||||||
// Symlink to a file inside the root directory.
|
|
||||||
err := os.Symlink(filepath.Join(rfs.root, "server/source.txt"), filepath.Join(rfs.root, "server/symlink.txt"))
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Delete the symlink itself.
|
|
||||||
err = fs.Delete("symlink.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Ensure the symlink was deleted.
|
|
||||||
_, err = os.Lstat(filepath.Join(rfs.root, "server/symlink.txt"))
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
|
|
||||||
// Ensure the symlink target still exists.
|
|
||||||
_, err = os.Lstat(filepath.Join(rfs.root, "server/source.txt"))
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("does not delete files symlinked outside of the root directory", func() {
|
|
||||||
// Create a file outside the root directory.
|
|
||||||
err := rfs.CreateServerFileFromString("/../source.txt", "test content")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Create a symlink to the file outside the root directory.
|
|
||||||
err = os.Symlink(filepath.Join(rfs.root, "source.txt"), filepath.Join(rfs.root, "/server/symlink.txt"))
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Delete the symlink. (This should pass as we will delete the symlink itself, not it's target)
|
|
||||||
err = fs.Delete("symlink.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Ensure the file outside the root directory still exists.
|
|
||||||
_, err = os.Lstat(filepath.Join(rfs.root, "source.txt"))
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("does not delete files symlinked through a directory outside of the root directory", func() {
|
|
||||||
// Create a directory outside the root directory.
|
|
||||||
err := os.Mkdir(filepath.Join(rfs.root, "foo"), 0o755)
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Create a file inside the directory that is outside the root.
|
|
||||||
err = rfs.CreateServerFileFromString("/../foo/source.txt", "test content")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Symlink the directory that is outside the root to a file inside the root.
|
|
||||||
err = os.Symlink(filepath.Join(rfs.root, "foo"), filepath.Join(rfs.root, "server/symlink"))
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Delete a file inside the symlinked directory.
|
|
||||||
err = fs.Delete("symlink/source.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
|
|
||||||
// Ensure the file outside the root directory still exists.
|
|
||||||
_, err = os.Lstat(filepath.Join(rfs.root, "foo/source.txt"))
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("returns an error when trying to delete a non-existent file symlinked through a directory outside of the root directory", func() {
|
|
||||||
// Create a directory outside the root directory.
|
|
||||||
err := os.Mkdir(filepath.Join(rfs.root, "foo2"), 0o755)
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Symlink the directory that is outside the root to a file inside the root.
|
|
||||||
err = os.Symlink(filepath.Join(rfs.root, "foo2"), filepath.Join(rfs.root, "server/symlink"))
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
// Delete a file inside the symlinked directory.
|
|
||||||
err = fs.Delete("symlink/source.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
rfs.reset()
|
||||||
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package filesystem
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
iofs "io/fs"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -34,6 +33,8 @@ func (fs *Filesystem) IsIgnored(paths ...string) error {
|
|||||||
// This logic is actually copied over from the SFTP server code. Ideally that eventually
|
// This logic is actually copied over from the SFTP server code. Ideally that eventually
|
||||||
// either gets ported into this application, or is able to make use of this package.
|
// either gets ported into this application, or is able to make use of this package.
|
||||||
func (fs *Filesystem) SafePath(p string) (string, error) {
|
func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||||
|
var nonExistentPathResolution string
|
||||||
|
|
||||||
// Start with a cleaned up path before checking the more complex bits.
|
// Start with a cleaned up path before checking the more complex bits.
|
||||||
r := fs.unsafeFilePath(p)
|
r := fs.unsafeFilePath(p)
|
||||||
|
|
||||||
@@ -43,24 +44,47 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
|||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
|
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
|
||||||
} else if os.IsNotExist(err) {
|
} else if os.IsNotExist(err) {
|
||||||
// The target of one of the symlinks (EvalSymlinks is recursive) does not exist.
|
// The requested directory doesn't exist, so at this point we need to iterate up the
|
||||||
// So we get what target path does not exist and check if it's within the data
|
// path chain until we hit a directory that _does_ exist and can be validated.
|
||||||
// directory. If it is, we return the original path, otherwise we return an error.
|
parts := strings.Split(filepath.Dir(r), "/")
|
||||||
pErr, ok := err.(*iofs.PathError)
|
|
||||||
if !ok {
|
var try string
|
||||||
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
|
// Range over all of the path parts and form directory pathings from the end
|
||||||
|
// moving up until we have a valid resolution or we run out of paths to try.
|
||||||
|
for k := range parts {
|
||||||
|
try = strings.Join(parts[:(len(parts)-k)], "/")
|
||||||
|
|
||||||
|
if !fs.unsafeIsInDataDirectory(try) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := filepath.EvalSymlinks(try)
|
||||||
|
if err == nil {
|
||||||
|
nonExistentPathResolution = t
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ep = pErr.Path
|
}
|
||||||
|
|
||||||
|
// If the new path doesn't start with their root directory there is clearly an escape
|
||||||
|
// attempt going on, and we should NOT resolve this path for them.
|
||||||
|
if nonExistentPathResolution != "" {
|
||||||
|
if !fs.unsafeIsInDataDirectory(nonExistentPathResolution) {
|
||||||
|
return "", NewBadPathResolution(p, nonExistentPathResolution)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the nonExistentPathResolution variable is not empty then the initial path requested
|
||||||
|
// did not exist and we looped through the pathway until we found a match. At this point
|
||||||
|
// we've confirmed the first matched pathway exists in the root server directory, so we
|
||||||
|
// can go ahead and just return the path that was requested initially.
|
||||||
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the requested directory from EvalSymlinks begins with the server root directory go
|
// If the requested directory from EvalSymlinks begins with the server root directory go
|
||||||
// ahead and return it. If not we'll return an error which will block any further action
|
// ahead and return it. If not we'll return an error which will block any further action
|
||||||
// on the file.
|
// on the file.
|
||||||
if fs.unsafeIsInDataDirectory(ep) {
|
if fs.unsafeIsInDataDirectory(ep) {
|
||||||
// Returning the original path here instead of the resolved path ensures that
|
return ep, nil
|
||||||
// whatever the user is trying to do will work as expected. If we returned the
|
|
||||||
// resolved path, the user would be unable to know that it is in fact a symlink.
|
|
||||||
return r, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", NewBadPathResolution(p, r)
|
return "", NewBadPathResolution(p, r)
|
||||||
|
|||||||
@@ -115,14 +115,6 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Symlink(filepath.Join(rfs.root, "malicious_does_not_exist.txt"), filepath.Join(rfs.root, "/server/symlinked_does_not_exist.txt")); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Symlink(filepath.Join(rfs.root, "/server/symlinked_does_not_exist.txt"), filepath.Join(rfs.root, "/server/symlinked_does_not_exist2.txt")); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Symlink(filepath.Join(rfs.root, "/malicious_dir"), filepath.Join(rfs.root, "/server/external_dir")); err != nil {
|
if err := os.Symlink(filepath.Join(rfs.root, "/malicious_dir"), filepath.Join(rfs.root, "/server/external_dir")); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -136,22 +128,6 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot write to a non-existent file symlinked outside the root", func() {
|
|
||||||
r := bytes.NewReader([]byte("testing what the fuck"))
|
|
||||||
|
|
||||||
err := fs.Writefile("symlinked_does_not_exist.txt", r)
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("cannot write to chained symlinks with target that does not exist outside the root", func() {
|
|
||||||
r := bytes.NewReader([]byte("testing what the fuck"))
|
|
||||||
|
|
||||||
err := fs.Writefile("symlinked_does_not_exist2.txt", r)
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("cannot write a file to a directory symlinked outside the root", func() {
|
g.It("cannot write a file to a directory symlinked outside the root", func() {
|
||||||
r := bytes.NewReader([]byte("testing"))
|
r := bytes.NewReader([]byte("testing"))
|
||||||
|
|
||||||
|
|||||||
@@ -447,8 +447,8 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
|||||||
"compress": "false",
|
"compress": "false",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Privileged: true,
|
||||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||||
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the root directory for the server exists properly before attempting
|
// Ensure the root directory for the server exists properly before attempting
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
package system
|
package system
|
||||||
|
|
||||||
var Version = "1.7.5"
|
var Version = "1.7.0"
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ type SinkPool struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
|
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
|
||||||
// server instance for its full lifetime.
|
// server instance for it's full lifetime.
|
||||||
func NewSinkPool() *SinkPool {
|
func NewSinkPool() *SinkPool {
|
||||||
return &SinkPool{}
|
return &SinkPool{}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user