Compare commits

..

1 Commits

Author SHA1 Message Date
Pterodactyl CI
a76d84ea96 bump version for release 2022-09-26 01:24:05 +00:00
56 changed files with 627 additions and 1662 deletions

3
.github/FUNDING.yml vendored
View File

@@ -1 +1,2 @@
github: [ matthewpi ]
github: [ DaneEveritt ]
custom: [ "https://paypal.me/PterodactylSoftware" ]

View File

@@ -12,7 +12,7 @@ jobs:
fail-fast: false
matrix:
os: [ ubuntu-20.04 ]
go: [ '1.18.7' ]
go: [ '^1.17' ]
goos: [ linux ]
goarch: [ amd64, arm64 ]
runs-on: ${{ matrix.os }}
@@ -58,6 +58,7 @@ jobs:
run: |
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH} wings.go
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH}_debug wings.go
upx build/wings_${GOOS}_${{ matrix.goarch }}
chmod +x build/*
- name: Tests
run: go test -race ./...

View File

@@ -11,7 +11,7 @@ jobs:
uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.18.7'
go-version: '^1.17'
- name: Build
env:
REF: ${{ github.ref }}
@@ -22,8 +22,8 @@ jobs:
run: go test ./...
- name: Compress binary and make it executable
run: |
chmod +x build/wings_linux_amd64
chmod +x build/wings_linux_arm64
upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
upx build/wings_linux_arm64 && chmod +x build/wings_linux_arm64
- name: Extract changelog
env:
REF: ${{ github.ref }}

View File

@@ -1,17 +1,5 @@
# Changelog
## v1.7.2
### Fixed
* The S3 backup driver now supports Cloudflare R2
### Added
* During a server transfer, there is a new "Archiving" status that outputs the progress of creating the server transfer archive.
* Adds a configuration option to control the list of trusted proxies that can be used to determine the client IP address.
* Adds a configuration option to control the Docker username space setting when Wings creates containers.
### Changed
* Releases are now built using `Go 1.18` — the minimum version required to build Wings is now `Go 1.18`.
## v1.7.1
### Fixed
* YAML parser has been updated to fix some strange issues

View File

@@ -1,5 +1,5 @@
# Stage 1 (Build)
FROM golang:1.18-alpine AS builder
FROM golang:1.17-alpine AS builder
ARG VERSION
RUN apk add --update --no-cache git make

View File

@@ -4,9 +4,6 @@ build:
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_amd64 -v wings.go
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
race:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
debug:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1
@@ -17,6 +14,9 @@ rmdebug:
go build -gcflags "all=-N -l" -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml
compress:
upx --brute build/wings_*
cross-build: clean build compress
clean:

View File

@@ -14,7 +14,7 @@ dependencies, and allowing users to authenticate with the same credentials they
## Sponsors
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
[Interested in becoming a sponsor?](https://github.com/sponsors/DaneEveritt)
| Company | About |
| ------- | ----- |

View File

@@ -1,127 +0,0 @@
package cmd
import (
"context"
"os"
"os/exec"
"strings"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/vhd"
"github.com/pterodactyl/wings/loggers/cli"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server"
"github.com/spf13/cobra"
)
type MigrateVHDCommand struct {
manager *server.Manager
}
func newMigrateVHDCommand() *cobra.Command {
return &cobra.Command{
Use: "migrate-vhd",
Short: "migrates existing data from a directory tree into virtual hard-disks",
PreRun: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.DebugLevel)
log.SetHandler(cli.Default)
},
Run: func(cmd *cobra.Command, args []string) {
client := remote.NewFromConfig(config.Get())
manager, err := server.NewManager(cmd.Context(), client, true)
if err != nil {
log.WithField("error", err).Fatal("failed to create new server manager")
}
c := &MigrateVHDCommand{
manager: manager,
}
if err := c.Run(cmd.Context()); err != nil {
log.WithField("error", err).Fatal("failed to execute command")
}
},
}
}
// Run executes the migration command.
func (m *MigrateVHDCommand) Run(ctx context.Context) error {
if !vhd.Enabled() {
return errors.New("cannot migrate to vhd: the underlying driver must be set to \"vhd\"")
}
for _, s := range m.manager.All() {
s.Log().Debug("starting migration of server contents to virtual disk...")
v := vhd.New(s.DiskSpace(), vhd.DiskPath(s.ID()), s.Filesystem().Path())
s.Log().WithField("disk_image", v.Path()).Info("creating virtual disk for server")
if err := v.Allocate(ctx); err != nil {
return errors.WithStackIf(err)
}
s.Log().Info("creating virtual filesystem for server")
if err := v.MakeFilesystem(ctx); err != nil {
// If the filesystem already exists no worries, just move on with our
// day here.
if !errors.Is(err, vhd.ErrFilesystemExists) {
return errors.WithStack(err)
}
}
bak := strings.TrimSuffix(s.Filesystem().Path(), "/") + "_bak"
mounted, err := v.IsMounted(ctx)
if err != nil {
return err
} else if !mounted {
s.Log().WithField("backup_dir", bak).Debug("virtual disk is not yet mounted, creating backup directory")
// Create a backup directory of the server files if one does not already exist
// at that location. If one does exists we'll just assume it is good to go and
// rely on it to provide the files we'll need.
if _, err := os.Lstat(bak); os.IsNotExist(err) {
if err := os.Rename(s.Filesystem().Path(), bak); err != nil {
return errors.Wrap(err, "failed to rename existing data directory for backup")
}
} else if err != nil {
return errors.WithStack(err)
}
if err := os.RemoveAll(s.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "failed to remove base server files path")
}
} else {
s.Log().Warn("server appears to already have existing mount, not creating data backup")
}
// Attempt to mount the disk at the expected path now that we've created
// a backup of the server files.
if err := v.Mount(ctx); err != nil && !errors.Is(err, vhd.ErrFilesystemMounted) {
return errors.WithStackIf(err)
}
// Copy over the files from the backup for this server but only
// if we have a backup directory currently.
_, err = os.Lstat(bak)
if err != nil {
if !os.IsNotExist(err) {
s.Log().WithField("error", err).Warn("failed to stat backup directory")
} else {
s.Log().Info("no backup data directory exists, not restoring files")
}
} else {
cmd := exec.CommandContext(ctx, "cp", "-r", bak+"/.", s.Filesystem().Path())
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "migrate: failed to move old server files into new direcotry")
} else {
if err := os.RemoveAll(bak); err != nil {
s.Log().WithField("directory", bak).WithField("error", err).Warn("failed to remove backup directory")
}
}
}
s.Log().Info("updating server file ownership...")
if err := s.Filesystem().Chown("/"); err != nil {
s.Log().WithField("error", err).Warn("failed to update ownership of new server files")
}
s.Log().Info("finished migration to virtual disk...")
}
return nil
}

View File

@@ -5,6 +5,8 @@ import (
"crypto/tls"
"errors"
"fmt"
"github.com/pterodactyl/wings/internal/cron"
"github.com/pterodactyl/wings/internal/database"
log2 "log"
"net/http"
_ "net/http/pprof"
@@ -16,9 +18,6 @@ import (
"strings"
"time"
"github.com/pterodactyl/wings/internal/cron"
"github.com/pterodactyl/wings/internal/database"
"github.com/NYTimes/logrotate"
"github.com/apex/log"
"github.com/apex/log/handlers/multi"
@@ -47,16 +46,8 @@ var (
var rootCommand = &cobra.Command{
Use: "wings",
Short: "Runs the API server allowing programmatic control of game servers for Pterodactyl Panel.",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
initConfig()
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
},
PreRun: func(cmd *cobra.Command, args []string) {
initConfig()
initLogging()
if tls, _ := cmd.Flags().GetBool("auto-tls"); tls {
if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" {
@@ -85,7 +76,6 @@ func Execute() {
func init() {
rootCommand.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
rootCommand.PersistentFlags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
// Flags specifically used when running the API.
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
@@ -93,11 +83,11 @@ func init() {
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
rootCommand.AddCommand(versionCommand)
rootCommand.AddCommand(configureCmd)
rootCommand.AddCommand(newDiagnosticsCommand())
rootCommand.AddCommand(newMigrateVHDCommand())
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
@@ -105,6 +95,13 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
log.Debug("running in debug mode")
log.WithField("config_file", configPath).Info("loading configuration from file")
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
if err := config.ConfigureTimezone(); err != nil {
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
}
@@ -139,7 +136,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
log.WithField("error", err).Fatal("failed to initialize database")
}
manager, err := server.NewManager(cmd.Context(), pclient, false)
manager, err := server.NewManager(cmd.Context(), pclient)
if err != nil {
log.WithField("error", err).Fatal("failed to load server configurations")
}

View File

@@ -91,9 +91,6 @@ type ApiConfiguration struct {
// The maximum size for files uploaded through the Panel in MB.
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
// A list of IP address of proxies that may send a X-Forwarded-For header to set the true clients IP
TrustedProxies []string `json:"trusted_proxies" yaml:"trusted_proxies"`
}
// RemoteQueryConfiguration defines the configuration settings for remote requests
@@ -305,11 +302,6 @@ type Configuration struct {
// is only required by users running Wings without SSL certificates and using internal IP
// addresses in order to connect. Most users should NOT enable this setting.
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
// Servers contains all of the settings that are used when configuring individual servers
// on the system. This is a global configuration for all server instances, not to be confused
// with the per-server configurations provided by the Panel API.
Servers Servers `json:"servers" yaml:"servers"`
}
// NewAtPath creates a new struct and set the path where it should be stored.

View File

@@ -78,14 +78,6 @@ type DockerConfiguration struct {
Overhead Overhead `json:"overhead" yaml:"overhead"`
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
// Sets the user namespace mode for the container when user namespace remapping option is
// enabled.
//
// If the value is blank, the daemon's user namespace remapping configuration is used,
// if the value is "host", then the pterodactyl containers are started with user namespace
// remapping disabled
UsernsMode string `default:"" json:"userns_mode" yaml:"userns_mode"`
}
// RegistryConfiguration defines the authentication credentials for a given

View File

@@ -1,28 +0,0 @@
package config
type FSDriver string
const (
FSDriverLocal FSDriver = "local"
FSDriverVHD FSDriver = "vhd"
)
type Servers struct {
// Filesystem defines all of the filesystem specific settings used for servers.
Filesystem Filesystem `json:"filesystem" yaml:"filesystem"`
}
type Filesystem struct {
// Driver defines the underlying filesystem driver that is used when a server is
// created on the system. This currently supports either of the following drivers:
//
// local: the local driver is the default one used by Wings. This offloads all of the
// disk limit enforcement to Wings itself. This has a performance impact but is
// the most compatiable with all systems.
// vhd: the vhd driver uses "virtual" disks on the host system to enforce disk limits
// on the server. This is more performant since calculations do not need to be made
// by Wings itself when enforcing limits. It also avoids vulnerabilities that exist
// in the local driver which allow malicious processes to quickly create massive files
// before Wings is able to detect and stop them from being written.
Driver FSDriver `default:"local" json:"driver" yaml:"driver"`
}

View File

@@ -14,7 +14,6 @@ import (
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
)

View File

@@ -261,7 +261,6 @@ func (e *Environment) Create() error {
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
},
NetworkMode: networkMode,
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
}
if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {

View File

@@ -10,7 +10,6 @@ import (
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote"

View File

@@ -5,7 +5,6 @@ import (
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
)

122
go.mod
View File

@@ -1,125 +1,129 @@
module github.com/pterodactyl/wings
go 1.18
go 1.17
require (
emperror.dev/errors v0.8.1
github.com/AlecAivazis/survey/v2 v2.3.6
github.com/AlecAivazis/survey/v2 v2.3.4
github.com/Jeffail/gabs/v2 v2.6.1
github.com/NYTimes/logrotate v1.0.0
github.com/apex/log v1.9.0
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/beevik/etree v1.1.0
github.com/buger/jsonparser v1.1.1
github.com/cenkalti/backoff/v4 v4.1.3
github.com/cenkalti/backoff/v4 v4.1.2
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
github.com/creasty/defaults v1.6.0
github.com/docker/docker v20.10.18+incompatible
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.14+incompatible
github.com/docker/go-connections v0.4.0
github.com/fatih/color v1.13.0
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
github.com/gabriel-vasile/mimetype v1.4.1
github.com/gammazero/workerpool v1.1.3
github.com/gabriel-vasile/mimetype v1.4.0
github.com/gammazero/workerpool v1.1.2
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gin-gonic/gin v1.8.1
github.com/glebarez/sqlite v1.4.8
github.com/go-co-op/gocron v1.17.0
github.com/goccy/go-json v0.9.11
github.com/gin-gonic/gin v1.7.7
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.5.0
github.com/iancoleman/strcase v0.2.0
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
github.com/juju/ratelimit v1.0.2
github.com/karrick/godirwalk v1.17.0
github.com/klauspost/compress v1.15.11
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
github.com/juju/ratelimit v1.0.1
github.com/karrick/godirwalk v1.16.1
github.com/klauspost/pgzip v1.2.5
github.com/magiconair/properties v1.8.6
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-colorable v0.1.12
github.com/mholt/archiver/v3 v3.5.1
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/sftp v1.13.5
github.com/pkg/sftp v1.13.4
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/spf13/afero v1.9.2
github.com/spf13/cobra v1.5.0
github.com/stretchr/testify v1.8.0
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
gopkg.in/ini.v1 v1.67.0
github.com/spf13/cobra v1.4.0
github.com/stretchr/testify v1.7.5
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
gopkg.in/ini.v1 v1.66.4
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
gorm.io/gorm v1.23.10
)
require (
github.com/glebarez/sqlite v1.4.6
github.com/go-co-op/gocron v1.15.0
github.com/goccy/go-json v0.9.6
github.com/klauspost/compress v1.15.1
gopkg.in/yaml.v3 v3.0.1
gorm.io/gorm v1.23.8
)
require golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/containerd v1.6.2 // indirect
github.com/containerd/fifo v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gammazero/deque v0.2.0 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/gammazero/deque v0.1.1 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/glebarez/go-sqlite v1.19.1 // indirect
github.com/glebarez/go-sqlite v1.17.3 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.11.1 // indirect
github.com/go-playground/validator/v10 v10.10.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/gorilla/mux v1.7.4 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/magefile/mage v1.14.0 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/magefile/mage v1.13.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nwaples/rardecode v1.1.3 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pierrec/lz4/v4 v4.1.14 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/ugorji/go/codec v1.2.7 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
golang.org/x/text v0.3.8 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.1.12 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.28.1 // indirect
modernc.org/libc v1.20.0 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect
modernc.org/sqlite v1.19.1 // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
google.golang.org/grpc v1.45.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
modernc.org/libc v1.16.17 // indirect
modernc.org/mathutil v1.4.1 // indirect
modernc.org/memory v1.1.1 // indirect
modernc.org/sqlite v1.17.3 // indirect
)

530
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,6 @@ import (
"emperror.dev/errors"
"github.com/asaskevich/govalidator"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server"
)
@@ -38,7 +37,7 @@ func New(ctx context.Context, manager *server.Manager, details ServerDetails) (*
// Create a new server instance using the configuration we wrote to the disk
// so that everything gets instantiated correctly on the struct.
s, err := manager.InitServer(ctx, c)
s, err := manager.InitServer(c)
if err != nil {
return nil, errors.WrapIf(err, "installer: could not init server instance")
}

View File

@@ -2,9 +2,7 @@ package cron
import (
"context"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/server"

View File

@@ -2,15 +2,13 @@ package cron
import (
"context"
"time"
"emperror.dev/errors"
log2 "github.com/apex/log"
"github.com/go-co-op/gocron"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
"time"
)
const ErrCronRunning = errors.Sentinel("cron: job already running")

View File

@@ -2,14 +2,12 @@ package cron
import (
"context"
"reflect"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
"reflect"
)
type sftpCron struct {

View File

@@ -1,23 +1,19 @@
package database
import (
"path/filepath"
"time"
"emperror.dev/errors"
"github.com/glebarez/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/system"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"path/filepath"
"time"
)
var (
o system.AtomicBool
db *gorm.DB
)
var o system.AtomicBool
var db *gorm.DB
// Initialize configures the local SQLite database for Wings and ensures that the models have
// been fully migrated.

View File

@@ -1,11 +1,9 @@
package models
import (
"time"
"gorm.io/gorm"
"github.com/pterodactyl/wings/system"
"gorm.io/gorm"
"time"
)
type Event string

View File

@@ -2,7 +2,6 @@ package models
import (
"database/sql"
"emperror.dev/errors"
"github.com/goccy/go-json"
)

View File

@@ -1,330 +0,0 @@
package vhd
import (
"context"
"emperror.dev/errors"
"fmt"
"github.com/pterodactyl/wings/config"
"github.com/spf13/afero"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
)
var (
ErrInvalidDiskPathTarget = errors.Sentinel("vhd: disk path is a directory or symlink")
ErrMountPathNotDirectory = errors.Sentinel("vhd: mount point is not a directory")
ErrFilesystemMounted = errors.Sentinel("vhd: filesystem is already mounted")
ErrFilesystemNotMounted = errors.Sentinel("vhd: filesystem is not mounted")
ErrFilesystemExists = errors.Sentinel("vhd: filesystem already exists on disk")
)
var useDdAllocation bool
var setDdAllocator sync.Once
// hasExitCode allows this code to test the response error to see if there is
// an exit code available from the command call that can be used to determine if
// something went wrong.
type hasExitCode interface {
ExitCode() int
}
// Commander defines an interface that must be met for executing commands on the
// underlying OS. By default the vhd package will use Go's exec.Cmd type for
// execution. This interface allows stubbing out on tests, or potentially custom
// setups down the line.
type Commander interface {
Run() error
Output() ([]byte, error)
String() string
}
// CommanderProvider is a function that provides a struct meeting the Commander
// interface requirements.
type CommanderProvider func(ctx context.Context, name string, args ...string) Commander
// CfgOption is a configuration option callback for the Disk.
type CfgOption func(d *Disk) *Disk
// Disk represents the underlying virtual disk for the instance.
type Disk struct {
mu sync.RWMutex
// The total size of the disk allowed in bytes.
size int64
// The path where the disk image should be created.
diskPath string
// The point at which this disk should be made available on the system. This
// is where files can be read/written to.
mountAt string
fs afero.Fs
commander CommanderProvider
}
// DiskPath returns the underlying path that contains the virtual disk for the server
// identified by its UUID.
func DiskPath(uuid string) string {
return filepath.Join(config.Get().System.Data, ".vhd/", uuid+".img")
}
// Enabled returns true when VHD support is enabled on the instance.
func Enabled() bool {
return config.Get().Servers.Filesystem.Driver == config.FSDriverVHD
}
// New returns a new Disk instance. The "size" parameter should be provided in
// bytes of space allowed for the disk. An additional slice of option callbacks
// can be provided to programatically swap out the underlying filesystem
// implementation or the underlying command exection engine.
func New(size int64, diskPath string, mountAt string, opts ...func(*Disk)) *Disk {
if diskPath == "" || mountAt == "" {
panic("vhd: cannot specify an empty disk or mount path")
}
d := Disk{
size: size,
diskPath: diskPath,
mountAt: mountAt,
fs: afero.NewOsFs(),
commander: func(ctx context.Context, name string, args ...string) Commander {
return exec.CommandContext(ctx, name, args...)
},
}
for _, opt := range opts {
opt(&d)
}
return &d
}
// WithFs allows for a different underlying filesystem to be provided to the
// virtual disk manager.
func WithFs(fs afero.Fs) func(*Disk) {
return func(d *Disk) {
d.fs = fs
}
}
// WithCommander allows a different Commander provider to be provided.
func WithCommander(c CommanderProvider) func(*Disk) {
return func(d *Disk) {
d.commander = c
}
}
func (d *Disk) Path() string {
return d.diskPath
}
func (d *Disk) MountPath() string {
return d.mountAt
}
// Exists reports if the disk exists on the system yet or not. This only verifies
// the presence of the disk image, not the validity of it. An error is returned
// if the path exists but the destination is not a file or is a symlink.
func (d *Disk) Exists() (bool, error) {
d.mu.RLock()
defer d.mu.RUnlock()
st, err := d.fs.Stat(d.diskPath)
if err != nil && os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, errors.WithStack(err)
}
if !st.IsDir() && st.Mode()&os.ModeSymlink == 0 {
return true, nil
}
return false, errors.WithStack(ErrInvalidDiskPathTarget)
}
// IsMounted checks to see if the given disk is currently mounted.
func (d *Disk) IsMounted(ctx context.Context) (bool, error) {
find := d.mountAt + " ext4"
cmd := d.commander(ctx, "grep", "-qs", find, "/proc/mounts")
if err := cmd.Run(); err != nil {
if v, ok := err.(hasExitCode); ok {
if v.ExitCode() == 1 {
return false, nil
}
}
return false, errors.Wrap(err, "vhd: failed to execute grep for mount existence")
}
return true, nil
}
// Mount attempts to mount the disk as configured. If it does not exist or the
// mount command fails an error will be returned to the caller. This does not
// attempt to create the disk if it is missing from the filesystem.
//
// Attempting to mount a disk which does not exist will result in an error being
// returned to the caller. If the disk is already mounted an ErrFilesystemMounted
// error is returned to the caller.
func (d *Disk) Mount(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.mount(ctx)
}
// Unmount attempts to unmount the disk from the system. If the disk is not
// currently mounted this function is a no-op and ErrFilesystemNotMounted is
// returned to the caller.
func (d *Disk) Unmount(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.unmount(ctx)
}
// Allocate executes the "fallocate" command on the disk. This will first unmount
// the disk from the system before attempting to actually allocate the space. If
// this disk already exists on the machine it will be resized accordingly.
//
// DANGER! This will unmount the disk from the machine while performing this
// action, use caution when calling it during normal processes.
func (d *Disk) Allocate(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
if exists, err := d.Exists(); exists {
// If the disk currently exists attempt to unmount the mount point before
// allocating space.
if err := d.Unmount(ctx); err != nil {
return errors.WithStackIf(err)
}
} else if err != nil {
return errors.Wrap(err, "vhd: failed to check for existence of root disk")
}
trim := path.Base(d.diskPath)
if err := d.fs.MkdirAll(strings.TrimSuffix(d.diskPath, trim), 0700); err != nil {
return errors.Wrap(err, "vhd: failed to create base vhd disk directory")
}
cmd := d.allocationCmd(ctx)
if _, err := cmd.Output(); err != nil {
msg := "vhd: failed to execute space allocation command"
if v, ok := err.(*exec.ExitError); ok {
stderr := strings.Trim(string(v.Stderr), ".\n")
if !useDdAllocation && strings.HasSuffix(stderr, "not supported") {
// Try again: fallocate is not supported on some filesystems so we'll fall
// back to making use of dd for subsequent operations.
setDdAllocator.Do(func() {
useDdAllocation = true
})
return d.Allocate(ctx)
}
msg = msg + ": " + stderr
}
return errors.Wrap(err, msg)
}
return errors.WithStack(d.fs.Chmod(d.diskPath, 0600))
}
// Resize will change the internal disk size limit and then allocate the new
// space to the disk automatically.
func (d *Disk) Resize(ctx context.Context, size int64) error {
atomic.StoreInt64(&d.size, size)
return d.Allocate(ctx)
}
// Destroy removes the underlying allocated disk image and unmounts the disk.
func (d *Disk) Destroy(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
if err := d.unmount(ctx); err != nil {
return errors.WithStackIf(err)
}
return errors.WithStackIf(d.fs.RemoveAll(d.mountAt))
}
// MakeFilesystem will attempt to execute the "mkfs" command against the disk on
// the machine. If the disk has already been created this command will return an
// ErrFilesystemExists error to the caller. You should manually unmount the disk
// if it shouldn't be mounted at this point.
func (d *Disk) MakeFilesystem(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
// If no error is returned when mounting DO NOT execute this command as it will
// completely destroy the data stored at that location.
err := d.Mount(ctx)
if err == nil || errors.Is(err, ErrFilesystemMounted) {
// If it wasn't already mounted try to clean up at this point and unmount
// the disk. If this fails just ignore it for now.
if err != nil {
_ = d.Unmount(ctx)
}
return ErrFilesystemExists
}
if !strings.Contains(err.Error(), "can't find in /etc/fstab") && !strings.Contains(err.Error(), "exit status 32") {
return errors.WrapIf(err, "vhd: unexpected error from mount command")
}
// As long as we got an error back that was because we couldn't find thedisk
// in the /etc/fstab file we're good. Otherwise it means the disk probably exists
// or something else went wrong.
//
// Because this is a destructive command and non-tty based exection of it implies
// "-F" (force), we need to only run it when we can guarantee it doesn't already
// exist. No vague "maybe that error is expected" allowed here.
cmd := d.commander(ctx, "mkfs", "-t", "ext4", d.diskPath)
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "vhd: failed to make filesystem for disk")
}
return nil
}
func (d *Disk) mount(ctx context.Context) error {
if isMounted, err := d.IsMounted(ctx); err != nil {
return errors.WithStackIf(err)
} else if isMounted {
return ErrFilesystemMounted
}
if st, err := d.fs.Stat(d.mountAt); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "vhd: failed to stat mount path")
} else if os.IsNotExist(err) {
if err := d.fs.MkdirAll(d.mountAt, 0700); err != nil {
return errors.Wrap(err, "vhd: failed to create mount path")
}
} else if !st.IsDir() {
return errors.WithStack(ErrMountPathNotDirectory)
}
u := config.Get().System.User
if err := d.fs.Chown(d.mountAt, u.Uid, u.Gid); err != nil {
return errors.Wrap(err, "vhd: failed to chown mount point")
}
cmd := d.commander(ctx, "mount", "-t", "auto", "-o", "loop", d.diskPath, d.mountAt)
if _, err := cmd.Output(); err != nil {
msg := "vhd: failed to mount disk"
if v, ok := err.(*exec.ExitError); ok {
msg = msg + ": " + strings.Trim(string(v.Stderr), ".\n")
}
return errors.Wrap(err, msg)
}
return nil
}
func (d *Disk) unmount(ctx context.Context) error {
cmd := d.commander(ctx, "umount", d.mountAt)
if err := cmd.Run(); err != nil {
v, ok := err.(hasExitCode)
if ok && v.ExitCode() == 32 {
return ErrFilesystemNotMounted
}
return errors.Wrap(err, "vhd: failed to execute unmount command for disk")
}
return nil
}
// allocationCmd returns the command to allocate the disk image. This will attempt to
// use the fallocate command if available, otherwise it will fall back to dd if the
// fallocate command has previously failed.
//
// We use 1024 as the multiplier for all of the disk space logic within the application.
// Passing "K" (/1024) is the same as "KiB" for fallocate, but is different than "KB" (/1000).
func (d *Disk) allocationCmd(ctx context.Context) Commander {
s := atomic.LoadInt64(&d.size) / 1024
if useDdAllocation {
return d.commander(ctx, "dd", "if=/dev/zero", fmt.Sprintf("of=%s", d.diskPath), fmt.Sprintf("bs=%dk", s), "count=1")
}
return d.commander(ctx, "fallocate", "-l", fmt.Sprintf("%dK", s), d.diskPath)
}

View File

@@ -1,476 +0,0 @@
package vhd
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"testing"
"github.com/pterodactyl/wings/config"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func init() {
config.Set(&config.Configuration{
AuthenticationToken: "token123",
System: config.SystemConfiguration{
User: struct {
Uid int
Gid int
}{Uid: 10, Gid: 10},
},
})
}
type mockCmd struct {
run func() error
output func() ([]byte, error)
string func() string
}
func (m *mockCmd) Run() error {
if m.run != nil {
return m.run()
}
return nil
}
func (m *mockCmd) Output() ([]byte, error) {
if m.output != nil {
return m.output()
}
return nil, nil
}
func (m *mockCmd) String() string {
if m.string != nil {
return m.string()
}
return ""
}
var _ Commander = (*mockCmd)(nil)
type mockedExitCode struct {
code int
}
func (m *mockedExitCode) ExitCode() int {
return m.code
}
func (m *mockedExitCode) Error() string {
return fmt.Sprintf("mocked exit code: code %d", m.code)
}
func newMockDisk(c CommanderProvider) *Disk {
commander := func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{}
}
w := commander
if c != nil {
w = c
}
return New(100 * 1024 * 1024, "/disk.img", "/mnt", WithFs(afero.NewMemMapFs()), WithCommander(w))
}
func Test_New(t *testing.T) {
t.Run("creates expected struct", func(t *testing.T) {
d := New(100 * 1024 * 1024, "/disk.img", "/mnt")
assert.NotNil(t, d)
assert.Equal(t, int64(100 * 1024 * 1024), d.size)
assert.Equal(t, "/disk.img", d.diskPath)
assert.Equal(t, "/mnt", d.mountAt)
// Ensure by default we get a commander interface returned and that it
// returns an *exec.Cmd.
o := d.commander(context.TODO(), "foo", "-bar")
assert.NotNil(t, o)
_, ok := o.(Commander)
assert.True(t, ok)
_, ok = o.(*exec.Cmd)
assert.True(t, ok)
})
t.Run("creates an instance with custom options", func(t *testing.T) {
fs := afero.NewMemMapFs()
cprov := struct {
Commander
}{}
c := func(ctx context.Context, name string, args ...string) Commander {
return &cprov
}
d := New(100, "/disk.img", "/mnt", WithFs(fs), WithCommander(c))
assert.NotNil(t, d)
assert.Same(t, fs, d.fs)
assert.Same(t, &cprov, d.commander(context.TODO(), ""))
})
t.Run("panics if either path is empty", func(t *testing.T) {
assert.Panics(t, func() {
_ = New(100, "", "/bar")
})
assert.Panics(t, func() {
_ = New(100, "/foo", "")
})
})
}
func TestDisk_Exists(t *testing.T) {
t.Run("it exists", func(t *testing.T) {
d := newMockDisk(nil)
f, err := d.fs.Create("/disk.img")
require.NoError(t, err)
_ = f.Close()
exists, err := d.Exists()
assert.NoError(t, err)
assert.True(t, exists)
})
t.Run("it does not exist", func(t *testing.T) {
d := newMockDisk(nil)
exists, err := d.Exists()
assert.NoError(t, err)
assert.False(t, exists)
})
t.Run("it reports errors", func(t *testing.T) {
d := newMockDisk(nil)
err := d.fs.Mkdir("/disk.img", 0600)
require.NoError(t, err)
exists, err := d.Exists()
assert.Error(t, err)
assert.False(t, exists)
assert.EqualError(t, err, ErrInvalidDiskPathTarget.Error())
})
}
func TestDisk_IsMounted(t *testing.T) {
t.Run("executes command and finds mounted disk", func(t *testing.T) {
is := assert.New(t)
var called bool
pctx := context.TODO()
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
is.Same(pctx, ctx)
is.Equal("grep", name)
is.Len(args, 3)
is.Equal([]string{"-qs", "/mnt ext4", "/proc/mounts"}, args)
return &mockCmd{}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(pctx)
is.NoError(err)
is.True(mnt)
is.True(called)
})
t.Run("handles exit code 1 gracefully", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(context.TODO())
assert.NoError(t, err)
assert.False(t, mnt)
assert.True(t, called)
})
t.Run("handles unexpected errors successfully", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 3}
},
}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(context.TODO())
assert.Error(t, err)
assert.False(t, mnt)
})
}
func TestDisk_Mount(t *testing.T) {
failedCmd := func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{run: func() error {
return &mockedExitCode{code: 1}
}}
}
t.Run("error is returned if mount point is not a directory", func(t *testing.T) {
disk := newMockDisk(failedCmd)
_, err := disk.fs.Create("/mnt")
require.NoError(t, err)
err = disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrMountPathNotDirectory.Error())
})
t.Run("error is returned if mount point cannot be created", func(t *testing.T) {
disk := newMockDisk(failedCmd)
disk.fs = afero.NewReadOnlyFs(disk.fs)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to create mount path: operation not permitted")
})
t.Run("error is returned if already mounted", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrFilesystemMounted.Error())
})
t.Run("error is returned if mount command fails", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
output: func() ([]byte, error) {
called = true
assert.Equal(t, "mount", name)
assert.Equal(t, []string{"-t", "auto", "-o", "loop", "/disk.img", "/mnt"}, args)
return nil, &exec.ExitError{
ProcessState: &os.ProcessState{},
Stderr: []byte("foo bar.\n"),
}
},
}
}
disk := newMockDisk(cmd)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to mount disk: foo bar: exit status 0")
assert.True(t, called)
})
t.Run("disk can be mounted at existing path", func(t *testing.T) {
disk := newMockDisk(failedCmd)
require.NoError(t, disk.fs.Mkdir("/mnt", 0600))
err := disk.Mount(context.TODO())
assert.NoError(t, err)
})
t.Run("disk can be mounted at non-existing path", func(t *testing.T) {
disk := newMockDisk(failedCmd)
err := disk.Mount(context.TODO())
assert.NoError(t, err)
st, err := disk.fs.Stat("/mnt")
assert.NoError(t, err)
assert.True(t, st.IsDir())
})
}
func TestDisk_Unmount(t *testing.T) {
t.Run("can unmount a disk", func(t *testing.T) {
is := assert.New(t)
pctx := context.TODO()
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
is.Same(pctx, ctx)
is.Equal("umount", name)
is.Equal([]string{"/mnt"}, args)
return &mockCmd{}
}
disk := newMockDisk(cmd)
err := disk.Unmount(pctx)
is.NoError(err)
is.True(called)
})
t.Run("handles exit code 32 correctly", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 32}
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.NoError(t, err)
})
t.Run("non code 32 errors are returned as error", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.Error(t, err)
})
t.Run("errors without ExitCode function are returned", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return errors.New("foo bar")
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.Error(t, err)
})
}
func TestDisk_Allocate(t *testing.T) {
t.Run("disk is unmounted before allocating space", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
output: func() ([]byte, error) {
called = true
assert.Equal(t, "fallocate", name)
assert.Equal(t, []string{"-l", "102400K", "/disk.img"}, args)
return nil, nil
},
}
}
disk := newMockDisk(cmd)
err := disk.fs.Mkdir("/mnt", 0600)
require.NoError(t, err)
err = disk.Allocate(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("disk space is allocated even when not exists", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.Allocate(context.TODO())
assert.NoError(t, err)
})
t.Run("error is returned if command fails", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
output: func() ([]byte, error) {
return nil, &exec.ExitError{
ProcessState: &os.ProcessState{},
Stderr: []byte("foo bar.\n"),
}
},
}
}
disk := newMockDisk(cmd)
_, err := disk.fs.Create("/disk.img")
require.NoError(t, err)
err = disk.Allocate(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to execute fallocate command: foo bar: exit status 0")
})
}
func TestDisk_MakeFilesystem(t *testing.T) {
t.Run("filesystem is created if not found in /etc/fstab", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
// Expect the call from IsMounted here and just return what we need
// to indicate that nothing is currently mounted.
if name == "grep" {
return &mockedExitCode{code: 1}
}
called = true
assert.Equal(t, "mkfs", name)
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
return nil
},
output: func() ([]byte, error) {
return nil, errors.New("error: can't find in /etc/fstab foo bar testing")
},
}
}
disk := newMockDisk(cmd)
err := disk.MakeFilesystem(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("filesystem is created if error is returned from mount command", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
// Expect the call from IsMounted here and just return what we need
// to indicate that nothing is currently mounted.
if name == "grep" {
return &mockedExitCode{code: 1}
}
called = true
assert.Equal(t, "mkfs", name)
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
return nil
},
output: func() ([]byte, error) {
if name == "mount" {
return nil, &exec.ExitError{
Stderr: []byte("foo bar: exit status 32\n"),
}
}
return nil, nil
},
}
}
disk := newMockDisk(cmd)
err := disk.MakeFilesystem(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("error is returned if currently mounted", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.MakeFilesystem(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrFilesystemExists.Error())
})
}

View File

@@ -4,20 +4,18 @@ import (
"bytes"
"context"
"fmt"
"github.com/pterodactyl/wings/internal/models"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/cenkalti/backoff/v4"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)
@@ -60,18 +58,6 @@ func New(base string, opts ...ClientOption) Client {
return &c
}
// NewFromConfig returns a new Client using the configuration passed through
// by the caller.
func NewFromConfig(cfg *config.Configuration, opts ...ClientOption) Client {
passOpts := []ClientOption{
WithCredentials(cfg.AuthenticationTokenId, cfg.AuthenticationToken),
WithHttpClient(&http.Client{
Timeout: time.Second * time.Duration(cfg.RemoteQuery.Timeout),
}),
}
return New(cfg.PanelLocation, append(passOpts, opts...)...)
}
// WithCredentials sets the credentials to use when making request to the remote
// API endpoint.
func WithCredentials(id, token string) ClientOption {

View File

@@ -3,11 +3,10 @@ package remote
import (
"context"
"fmt"
"github.com/pterodactyl/wings/internal/models"
"strconv"
"sync"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"golang.org/x/sync/errgroup"

View File

@@ -2,11 +2,10 @@ package remote
import (
"bytes"
"regexp"
"strings"
"github.com/apex/log"
"github.com/goccy/go-json"
"regexp"
"strings"
"github.com/pterodactyl/wings/parser"
)
@@ -157,15 +156,9 @@ type BackupRemoteUploadResponse struct {
PartSize int64 `json:"part_size"`
}
type BackupPart struct {
ETag string `json:"etag"`
PartNumber int `json:"part_number"`
}
type BackupRequest struct {
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
Successful bool `json:"successful"`
Parts []BackupPart `json:"parts"`
}

View File

@@ -4,7 +4,6 @@ import (
"github.com/apex/log"
"github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/router/middleware"
wserver "github.com/pterodactyl/wings/server"
@@ -16,7 +15,6 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
router := gin.New()
router.Use(gin.Recovery())
router.SetTrustedProxies(config.Get().Api.TrustedProxies)
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.

View File

@@ -9,7 +9,6 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/router/downloader"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/tokens"

View File

@@ -3,6 +3,7 @@ package router
import (
"bufio"
"context"
"github.com/pterodactyl/wings/internal/models"
"io"
"mime/multipart"
"net/http"
@@ -13,8 +14,6 @@ import (
"strconv"
"strings"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/config"
"emperror.dev/errors"
@@ -603,7 +602,7 @@ func postServerUploadFiles(c *gin.Context) {
NewServerError(err, s).Abort(c)
return
} else {
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.ClientIP()), server.ActivityFileUploaded, models.ActivityMeta{
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.Request.RemoteAddr), server.ActivityFileUploaded, models.ActivityMeta{
"file": header.Filename,
"directory": filepath.Clean(directory),
})

View File

@@ -32,7 +32,7 @@ func getServerWebsocket(c *gin.Context) {
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
handler, err := websocket.GetHandler(s, c.Writer, c.Request, c)
handler, err := websocket.GetHandler(s, c.Writer, c.Request)
if err != nil {
NewServerError(err, s).Abort(c)
return

View File

@@ -12,6 +12,7 @@ import (
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"time"
"emperror.dev/errors"
@@ -29,9 +30,19 @@ import (
"github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/filesystem"
"github.com/pterodactyl/wings/system"
)
const progressWidth = 25
// Number of ticks in the progress bar
const ticks = 25
// 100% / number of ticks = percentage represented by each tick
const tickPercentage = 100 / ticks
type downloadProgress struct {
size int64
progress int64
}
// Data passed over to initiate a server transfer.
type serverTransferRequest struct {
@@ -84,7 +95,7 @@ func getServerArchive(c *gin.Context) {
return
}
// Compute sha256 checksum.
// Compute sha1 checksum.
h := sha256.New()
f, err := os.Open(archivePath)
if err != nil {
@@ -173,35 +184,11 @@ func postServerArchive(c *gin.Context) {
return
}
// Get the disk usage of the server (used to calculate the progress of the archive process)
rawSize, err := s.Filesystem().DiskUsage(true)
if err != nil {
sendTransferLog("Failed to get disk usage for server, aborting transfer..")
l.WithField("error", err).Error("failed to get disk usage for server")
return
}
// Create an archive of the entire server's data directory.
a := &filesystem.Archive{
BasePath: s.Filesystem().Path(),
Progress: filesystem.NewProgress(rawSize),
}
// Send the archive progress to the websocket every 3 seconds.
ctx, cancel := context.WithCancel(s.Context())
defer cancel()
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
sendTransferLog("Archiving " + p.Progress(progressWidth))
}
}
}(ctx, a.Progress, time.NewTicker(5*time.Second))
// Attempt to get an archive of the server.
if err := a.Create(getArchivePath(s.ID())); err != nil {
sendTransferLog("An error occurred while archiving the server: " + err.Error())
@@ -209,12 +196,6 @@ func postServerArchive(c *gin.Context) {
return
}
// Cancel the progress ticker.
cancel()
// Show 100% completion.
sendTransferLog("Archiving " + a.Progress.Progress(progressWidth))
sendTransferLog("Successfully created archive, attempting to notify panel..")
l.Info("successfully created server transfer archive, notifying panel..")
@@ -242,6 +223,12 @@ func postServerArchive(c *gin.Context) {
c.Status(http.StatusAccepted)
}
func (w *downloadProgress) Write(v []byte) (int, error) {
n := len(v)
atomic.AddInt64(&w.progress, int64(n))
return n, nil
}
// Log helper function to attach all errors and info output to a consistently formatted
// log string for easier querying.
func (str serverTransferRequest) log() *log.Entry {
@@ -334,7 +321,7 @@ func postTransfer(c *gin.Context) {
manager := middleware.ExtractManager(c)
u, err := uuid.Parse(data.ServerID)
if err != nil {
_ = WithError(c, err)
WithError(c, err)
return
}
// Force the server ID to be a valid UUID string at this point. If it is not an error
@@ -344,12 +331,11 @@ func postTransfer(c *gin.Context) {
data.log().Info("handling incoming server transfer request")
go func(data *serverTransferRequest) {
ctx := context.Background()
hasError := true
// Create a new server installer. This will only configure the environment and not
// run the installer scripts.
i, err := installer.New(ctx, manager, data.Server)
i, err := installer.New(context.Background(), manager, data.Server)
if err != nil {
_ = data.sendTransferStatus(manager.Client(), false)
data.log().WithField("error", err).Error("failed to validate received server data")
@@ -421,22 +407,25 @@ func postTransfer(c *gin.Context) {
sendTransferLog("Writing archive to disk...")
data.log().Info("writing transfer archive to disk...")
progress := filesystem.NewProgress(size)
// Send the archive progress to the websocket every 3 seconds.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
sendTransferLog("Downloading " + p.Progress(progressWidth))
// Copy the file.
progress := &downloadProgress{size: size}
ticker := time.NewTicker(3 * time.Second)
go func(progress *downloadProgress, t *time.Ticker) {
for range ticker.C {
// p = 100 (Downloaded)
// size = 1000 (Content-Length)
// p / size = 0.1
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
// 2.5 (Number of ticks as a float64)
// 2 (convert to an integer)
p := atomic.LoadInt64(&progress.progress)
// We have to cast these numbers to float in order to get a float result from the division.
width := ((float64(p) / float64(size)) * 100) / tickPercentage
bar := strings.Repeat("=", int(width)) + strings.Repeat(" ", ticks-int(width))
sendTransferLog("Downloading [" + bar + "] " + system.FormatBytes(p) + " / " + system.FormatBytes(progress.size))
}
}
}(ctx, progress, time.NewTicker(5*time.Second))
}(progress, ticker)
var reader io.Reader
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
@@ -449,16 +438,18 @@ func postTransfer(c *gin.Context) {
buf := make([]byte, 1024*4)
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
ticker.Stop()
_ = file.Close()
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
data.log().WithField("error", err).Error("failed to copy archive file to disk")
return
}
cancel()
ticker.Stop()
// Show 100% completion.
sendTransferLog("Downloading " + progress.Progress(progressWidth))
humanSize := system.FormatBytes(progress.size)
sendTransferLog("Downloading [" + strings.Repeat("=", ticks) + "] " + humanSize + " / " + humanSize)
if err := file.Close(); err != nil {
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")

View File

@@ -7,7 +7,6 @@ import (
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"

View File

@@ -3,21 +3,18 @@ package websocket
import (
"context"
"fmt"
"github.com/pterodactyl/wings/internal/models"
"net/http"
"strings"
"sync"
"time"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3"
"github.com/gin-gonic/gin"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/config"
@@ -82,7 +79,7 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
}
// GetHandler returns a new websocket handler using the context provided.
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin.Context) (*Handler, error) {
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Handler, error) {
upgrader := websocket.Upgrader{
// Ensure that the websocket request is originating from the Panel itself,
// and not some other location.
@@ -114,7 +111,7 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin
Connection: conn,
jwt: nil,
server: s,
ra: s.NewRequestActivity("", c.ClientIP()),
ra: s.NewRequestActivity("", r.RemoteAddr),
uuid: u,
}, nil
}

View File

@@ -2,12 +2,10 @@ package server
import (
"context"
"time"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
"time"
)
const ActivityPowerPrefix = "server:power."

View File

@@ -32,7 +32,7 @@ type RestoreCallback func(file string, r io.Reader, mode fs.FileMode, atime, mti
// noinspection GoNameStartsWithPackageName
type BackupInterface interface {
// SetClient sets the API request client on the backup interface.
SetClient(remote.Client)
SetClient(c remote.Client)
// Identifier returns the UUID of this backup as tracked by the panel
// instance.
Identifier() string
@@ -41,7 +41,7 @@ type BackupInterface interface {
WithLogContext(map[string]interface{})
// Generate creates a backup in whatever the configured source for the
// specific implementation is.
Generate(context.Context, string, string) (*ArchiveDetails, error)
Generate(ctx context.Context, basePath string, ignore string) (*ArchiveDetails, error)
// Ignored returns the ignored files for this backup instance.
Ignored() string
// Checksum returns a SHA1 checksum for the generated backup.
@@ -53,13 +53,13 @@ type BackupInterface interface {
// to store it until it is moved to the final spot.
Path() string
// Details returns details about the archive.
Details(context.Context, []remote.BackupPart) (*ArchiveDetails, error)
Details(ctx context.Context) (*ArchiveDetails, error)
// Remove removes a backup file.
Remove() error
// Restore is called when a backup is ready to be restored to the disk from
// the given source. Not every backup implementation will support this nor
// will every implementation require a reader be provided.
Restore(context.Context, io.Reader, RestoreCallback) error
Restore(ctx context.Context, reader io.Reader, callback RestoreCallback) error
}
type Backup struct {
@@ -119,8 +119,8 @@ func (b *Backup) Checksum() ([]byte, error) {
// Details returns both the checksum and size of the archive currently stored on
// the disk to the caller.
func (b *Backup) Details(ctx context.Context, parts []remote.BackupPart) (*ArchiveDetails, error) {
ad := ArchiveDetails{ChecksumType: "sha1", Parts: parts}
func (b *Backup) Details(ctx context.Context) (*ArchiveDetails, error) {
ad := ArchiveDetails{ChecksumType: "sha1"}
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
@@ -165,7 +165,6 @@ type ArchiveDetails struct {
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
Parts []remote.BackupPart `json:"parts"`
}
// ToRequest returns a request object.
@@ -175,6 +174,5 @@ func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
ChecksumType: ad.ChecksumType,
Size: ad.Size,
Successful: successful,
Parts: ad.Parts,
}
}

View File

@@ -69,7 +69,7 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
}
b.log().Info("created backup successfully")
ad, err := b.Details(ctx, nil)
ad, err := b.Details(ctx)
if err != nil {
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
}

View File

@@ -71,11 +71,10 @@ func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*Arch
}
defer rc.Close()
parts, err := s.generateRemoteRequest(ctx, rc)
if err != nil {
if err := s.generateRemoteRequest(ctx, rc); err != nil {
return nil, err
}
ad, err := s.Details(ctx, parts)
ad, err := s.Details(ctx)
if err != nil {
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
}
@@ -126,20 +125,20 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
}
// Generates the remote S3 request and begins the upload.
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) ([]remote.BackupPart, error) {
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) error {
defer rc.Close()
s.log().Debug("attempting to get size of backup...")
size, err := s.Backup.Size()
if err != nil {
return nil, err
return err
}
s.log().WithField("size", size).Debug("got size of backup")
s.log().Debug("attempting to get S3 upload urls from Panel...")
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
if err != nil {
return nil, err
return err
}
s.log().Debug("got S3 upload urls from the Panel")
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
@@ -157,26 +156,22 @@ func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser)
}
// Attempt to upload the part.
etag, err := uploader.uploadPart(ctx, part, partSize)
if err != nil {
if _, err := uploader.uploadPart(ctx, part, partSize); err != nil {
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
return nil, err
return err
}
uploader.uploadedParts = append(uploader.uploadedParts, remote.BackupPart{
ETag: etag,
PartNumber: i + 1,
})
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
}
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
return uploader.uploadedParts, nil
return nil
}
type s3FileUploader struct {
io.ReadCloser
client *http.Client
uploadedParts []remote.BackupPart
}
// newS3FileUploader returns a new file uploader instance.

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/mitchellh/colorstring"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)

View File

@@ -8,7 +8,6 @@ import (
"path/filepath"
"strings"
"sync"
"sync/atomic"
"emperror.dev/errors"
"github.com/apex/log"
@@ -18,7 +17,6 @@ import (
ignore "github.com/sabhiram/go-gitignore"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)
const memory = 4 * 1024
@@ -30,62 +28,6 @@ var pool = sync.Pool{
},
}
// Progress is used to track the progress of any I/O operation that are being
// performed.
type Progress struct {
// written is the total size of the files that have been written to the writer.
written int64
// Total is the total size of the archive in bytes.
total int64
// w .
w io.Writer
}
// NewProgress .
func NewProgress(total int64) *Progress {
return &Progress{total: total}
}
// Written returns the total number of bytes written.
// This function should be used when the progress is tracking data being written.
func (p *Progress) Written() int64 {
return atomic.LoadInt64(&p.written)
}
// Total returns the total size in bytes.
func (p *Progress) Total() int64 {
return atomic.LoadInt64(&p.total)
}
// Write totals the number of bytes that have been written to the writer.
func (p *Progress) Write(v []byte) (int, error) {
n := len(v)
atomic.AddInt64(&p.written, int64(n))
if p.w != nil {
return p.w.Write(v)
}
return n, nil
}
// Progress returns a formatted progress string for the current progress.
func (p *Progress) Progress(width int) string {
current := p.Written()
total := p.Total()
// v = 100 (Progress)
// size = 1000 (Content-Length)
// p / size = 0.1
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
// 2.5 (Number of ticks as a float64)
// 2 (convert to an integer)
// We have to cast these numbers to float in order to get a float result from the division.
ticks := ((float64(current) / float64(total)) * 100) / (float64(100) / float64(width))
bar := strings.Repeat("=", int(ticks)) + strings.Repeat(" ", width-int(ticks))
return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total)
}
type Archive struct {
// BasePath is the absolute path to create the archive from where Files and Ignore are
// relative to.
@@ -98,13 +40,10 @@ type Archive struct {
// Files specifies the files to archive, this takes priority over the Ignore option, if
// unspecified, all files in the BasePath will be archived unless Ignore is set.
Files []string
// Progress wraps the writer of the archive to pass through the progress tracker.
Progress *Progress
}
// Create creates an archive at dst with all the files defined in the
// included Files array.
// Create creates an archive at dst with all of the files defined in the
// included files struct.
func (a *Archive) Create(dst string) error {
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
@@ -123,34 +62,26 @@ func (a *Archive) Create(dst string) error {
writer = f
}
// The default compression level is BestSpeed
var cl = pgzip.BestSpeed
// Choose which compression level to use based on the compression_level configuration option
var compressionLevel int
switch config.Get().System.Backups.CompressionLevel {
case "none":
compressionLevel = pgzip.NoCompression
case "best_compression":
compressionLevel = pgzip.BestCompression
cl = pgzip.NoCompression
case "best_speed":
fallthrough
default:
compressionLevel = pgzip.BestSpeed
cl = pgzip.BestSpeed
case "best_compression":
cl = pgzip.BestCompression
}
// Create a new gzip writer around the file.
gw, _ := pgzip.NewWriterLevel(writer, compressionLevel)
gw, _ := pgzip.NewWriterLevel(writer, cl)
_ = gw.SetConcurrency(1<<20, 1)
defer gw.Close()
var pw io.Writer
if a.Progress != nil {
a.Progress.w = gw
pw = a.Progress
} else {
pw = gw
}
// Create a new tar writer around the gzip writer.
tw := tar.NewWriter(pw)
tw := tar.NewWriter(gw)
defer tw.Close()
// Configure godirwalk.
@@ -185,7 +116,7 @@ func (a *Archive) Create(dst string) error {
// being generated.
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
return func(path string, de *godirwalk.Dirent) error {
// Skip directories because we are walking them recursively.
// Skip directories because we walking them recursively.
if de.IsDir() {
return nil
}

View File

@@ -5,6 +5,8 @@ import (
"archive/zip"
"compress/gzip"
"fmt"
gzip2 "github.com/klauspost/compress/gzip"
zip2 "github.com/klauspost/compress/zip"
"os"
"path"
"path/filepath"
@@ -13,9 +15,6 @@ import (
"sync/atomic"
"time"
gzip2 "github.com/klauspost/compress/gzip"
zip2 "github.com/klauspost/compress/zip"
"emperror.dev/errors"
"github.com/mholt/archiver/v3"
)

View File

@@ -1,8 +1,6 @@
package filesystem
import (
"context"
"github.com/pterodactyl/wings/internal/vhd"
"sync"
"sync/atomic"
"syscall"
@@ -37,46 +35,18 @@ func (ult *usageLookupTime) Get() time.Time {
return ult.value
}
// MaxDisk returns the maximum amount of disk space that this Filesystem
// instance is allowed to use.
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
func (fs *Filesystem) MaxDisk() int64 {
return atomic.LoadInt64(&fs.diskLimit)
}
// SetDiskLimit sets the disk space limit for this Filesystem instance. This
// logic will also handle mounting or unmounting a virtual disk if it is being
// used currently.
func (fs *Filesystem) SetDiskLimit(ctx context.Context, i int64) error {
// Do nothing if this method is called but the limit is not changing.
if atomic.LoadInt64(&fs.diskLimit) == i {
return nil
}
if vhd.Enabled() {
if i == 0 && fs.IsVirtual() {
fs.log().Debug("disk limit changed to 0, destroying virtual disk")
// Remove the VHD if it is mounted so that we're just storing files directly on the system
// since we cannot have a virtual disk with a space limit enforced like that.
if err := fs.vhd.Destroy(ctx); err != nil {
return errors.WithStackIf(err)
}
fs.vhd = nil
}
// If we're setting a disk size go ahead and mount the VHD if it isn't already mounted,
// and then allocate the new space to the disk.
if i > 0 {
fs.log().Debug("disk limit updated, allocating new space to virtual disk")
if err := fs.ConfigureDisk(ctx, i); err != nil {
return errors.WithStackIf(err)
}
}
}
fs.log().WithField("limit", i).Debug("disk limit updated")
atomic.StoreInt64(&fs.diskLimit, i)
return nil
// Sets the disk space limit for this Filesystem instance.
func (fs *Filesystem) SetDiskLimit(i int64) {
atomic.SwapInt64(&fs.diskLimit, i)
}
// HasSpaceErr is the same concept as HasSpaceAvailable however this will return
// an error if there is no space, rather than a boolean value.
// The same concept as HasSpaceAvailable however this will return an error if there is
// no space, rather than a boolean value.
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
if !fs.HasSpaceAvailable(allowStaleValue) {
return newFilesystemError(ErrCodeDiskSpace, nil)
@@ -84,77 +54,67 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
return nil
}
// HasSpaceAvailable determines if the directory a file is trying to be added to
// has enough space available for the file to be written to.
// Determines if the directory a file is trying to be added to has enough space available
// for the file to be written to.
//
// Because determining the amount of space being used by a server is a taxing
// operation we will load it all up into a cache and pull from that as long as
// the key is not expired. This operation will potentially block unless
// allowStaleValue is set to true. See the documentation on DiskUsage for how
// this affects the call.
// Because determining the amount of space being used by a server is a taxing operation we
// will load it all up into a cache and pull from that as long as the key is not expired.
//
// If the current size of the disk is larger than the maximum allowed size this
// function will return false, in all other cases it will return true. We do
// not check the existence of a virtual disk at this point since this logic is
// used to return friendly error messages to users, and also prevent us wasting
// time on more taxing operations when we know the result will end up failing due
// to space limits.
//
// If the servers disk limit is set to 0 it means there is no limit, however the
// DiskUsage method is still called to keep the cache warm. This function will
// always return true for a server with no limit set.
// This operation will potentially block unless allowStaleValue is set to true. See the
// documentation on DiskUsage for how this affects the call.
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
size, err := fs.DiskUsage(allowStaleValue)
if err != nil {
fs.log().WithField("error", err).Warn("failed to determine root fs directory size")
log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
}
return fs.MaxDisk() == 0 || size <= fs.MaxDisk()
// If space is -1 or 0 just return true, means they're allowed unlimited.
//
// Technically we could skip disk space calculation because we don't need to check if the
// server exceeds its limit but because this method caches the disk usage it would be best
// to calculate the disk usage and always return true.
if fs.MaxDisk() == 0 {
return true
}
return size <= fs.MaxDisk()
}
// CachedUsage returns the cached value for the amount of disk space used by the
// filesystem. Do not rely on this function for critical logical checks. It
// should only be used in areas where the actual disk usage does not need to be
// perfect, e.g. API responses for server resource usage.
// Returns the cached value for the amount of disk space used by the filesystem. Do not rely on this
// function for critical logical checks. It should only be used in areas where the actual disk usage
// does not need to be perfect, e.g. API responses for server resource usage.
func (fs *Filesystem) CachedUsage() int64 {
return atomic.LoadInt64(&fs.diskUsed)
}
// DiskUsage is an internal helper function to allow other parts of the codebase
// to check the total used disk space as needed without overly taxing the system.
// This will prioritize the value from the cache to avoid excessive IO usage. We
// will only walk the filesystem and determine the size of the directory if there
// Internal helper function to allow other parts of the codebase to check the total used disk space
// as needed without overly taxing the system. This will prioritize the value from the cache to avoid
// excessive IO usage. We will only walk the filesystem and determine the size of the directory if there
// is no longer a cached value.
//
// If "allowStaleValue" is set to true, a stale value MAY be returned to the
// caller if there is an expired cache value AND there is currently another
// lookup in progress. If there is no cached value but no other lookup is in
// progress, a fresh disk space response will be returned to the caller.
// If "allowStaleValue" is set to true, a stale value MAY be returned to the caller if there is an
// expired cache value AND there is currently another lookup in progress. If there is no cached value but
// no other lookup is in progress, a fresh disk space response will be returned to the caller.
//
// This is primarily to avoid a bunch of I/O operations from piling up on the
// server, especially on servers with a large amount of files.
// This is primarily to avoid a bunch of I/O operations from piling up on the server, especially on servers
// with a large amount of files.
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
// A disk check interval of 0 means this functionality is completely disabled.
if fs.diskCheckInterval == 0 {
return 0, nil
}
since := time.Now().Add(time.Second * fs.diskCheckInterval * -1)
// If the last lookup time was before our calculated limit we will re-execute this
// checking logic. If the lookup time was after the oldest possible timestamp we will
// continue returning the cached value.
if fs.lastLookupTime.Get().Before(since) {
if !fs.lastLookupTime.Get().After(time.Now().Add(time.Second * fs.diskCheckInterval * -1)) {
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
// value. This is a blocking operation to the calling process.
if !allowStaleValue {
return fs.updateCachedDiskUsage()
}
} else if !fs.lookupInProgress.Load() {
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
// currently performing a lookup, just do the disk usage calculation in the background.
if !fs.lookupInProgress.Load() {
go func(fs *Filesystem) {
if _, err := fs.updateCachedDiskUsage(); err != nil {
fs.log().WithField("error", err).Warn("failed to update fs disk usage from within routine")
log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
}
}(fs)
}
@@ -234,14 +194,11 @@ func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
}
// HasSpaceFor is a function to determine if a server has space available for a
// file of a given size. If space is available, no error will be returned,
// otherwise an ErrNotEnoughSpace error will be raised. If this filesystem is
// configured as a virtual disk this function is a no-op as we will fall through
// to the native implementation to throw back an error if there is not disk
// space available.
// Helper function to determine if a server has space available for a file of a given size.
// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
// will be raised.
func (fs *Filesystem) HasSpaceFor(size int64) error {
if fs.IsVirtual() || fs.MaxDisk() == 0 {
if fs.MaxDisk() == 0 {
return nil
}
s, err := fs.DiskUsage(true)
@@ -277,7 +234,3 @@ func (fs *Filesystem) addDisk(i int64) int64 {
return atomic.AddInt64(&fs.diskUsed, i)
}
func (fs *Filesystem) log() *log.Entry {
return log.WithField("server", fs.uuid).WithField("root", fs.root)
}

View File

@@ -20,7 +20,6 @@ import (
ignore "github.com/sabhiram/go-gitignore"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/vhd"
"github.com/pterodactyl/wings/system"
)
@@ -31,23 +30,19 @@ type Filesystem struct {
diskUsed int64
diskCheckInterval time.Duration
denylist *ignore.GitIgnore
vhd *vhd.Disk
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
diskLimit int64
// The root data directory path for this Filesystem instance.
root string
uuid string
isTest bool
}
// New creates a new Filesystem instance for a given server.
func New(uuid string, size int64, denylist []string) *Filesystem {
root := filepath.Join(config.Get().System.Data, uuid)
fs := Filesystem{
uuid: uuid,
func New(root string, size int64, denylist []string) *Filesystem {
return &Filesystem{
root: root,
diskLimit: size,
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
@@ -55,15 +50,6 @@ func New(uuid string, size int64, denylist []string) *Filesystem {
lookupInProgress: system.NewAtomicBool(false),
denylist: ignore.CompileIgnoreLines(denylist...),
}
// If VHD support is enabled but this server is configured with no disk size
// limit we cannot actually use a virtual disk. In that case fall back to using
// the default driver.
if vhd.Enabled() && size > 0 {
fs.vhd = vhd.New(size, vhd.DiskPath(uuid), fs.root)
}
return &fs
}
// Path returns the root path for the Filesystem instance.
@@ -91,9 +77,9 @@ func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
return f, st, nil
}
// Touch acts by creating the given file and path on the disk if it is not present
// already. If it is present, the file is opened using the defaults which will
// truncate the contents. The opened file is then returned to the caller.
// Acts by creating the given file and path on the disk if it is not present already. If
// it is present, the file is opened using the defaults which will truncate the contents.
// The opened file is then returned to the caller.
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
cleaned, err := fs.SafePath(p)
if err != nil {
@@ -169,12 +155,6 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
buf := make([]byte, 1024*4)
sz, err := io.CopyBuffer(file, r, buf)
if err != nil {
if strings.Contains(err.Error(), "no space left on device") {
return newFilesystemError(ErrCodeDiskSpace, err)
}
return errors.WrapIf(err, "filesystem: failed to copy buffer for file write")
}
// Adjust the disk usage to account for the old size and the new size of the file.
fs.addDisk(sz - currentSize)
@@ -332,9 +312,8 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
return name + suffix + extension, nil
}
// Copy takes a given input file path and creates a copy of the file at the same
// location, appending a unique number to the end. For example, a copy of "test.txt"
// would create "test 2.txt" as the copy, then "test 3.txt" and so on.
// Copies a given file to the same location and appends a suffix to the file to indicate that
// it has been copied.
func (fs *Filesystem) Copy(p string) error {
cleaned, err := fs.SafePath(p)
if err != nil {

View File

@@ -1,42 +0,0 @@
package filesystem
import (
"context"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/vhd"
)
// IsVirtual returns true if the filesystem is currently using a virtual disk.
func (fs *Filesystem) IsVirtual() bool {
return fs.vhd != nil
}
// ConfigureDisk will attempt to create a new VHD if there is not one already
// created for the filesystem. If there is this method will attempt to resize
// the underlying data volume. Passing a size of 0 or less will panic.
func (fs *Filesystem) ConfigureDisk(ctx context.Context, size int64) error {
if size <= 0 {
panic("filesystem: attempt to configure disk with empty size")
}
if fs.vhd == nil {
fs.vhd = vhd.New(size, vhd.DiskPath(fs.uuid), fs.root)
if err := fs.MountDisk(ctx); err != nil {
return errors.WithStackIf(err)
}
}
// Resize the disk now that it is for sure mounted and exists on the system.
if err := fs.vhd.Resize(ctx, size); err != nil {
return errors.WithStackIf(err)
}
return nil
}
// MountDisk will attempt to mount the underlying virtual disk for the server.
// If the disk is already mounted this is a no-op function.
func (fs *Filesystem) MountDisk(ctx context.Context) error {
err := fs.vhd.Mount(ctx)
if errors.Is(err, vhd.ErrFilesystemMounted) {
return nil
}
return errors.WrapIf(err, "filesystem: failed to mount VHD")
}

View File

@@ -18,7 +18,6 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/remote"
@@ -450,7 +449,6 @@ func (ip *InstallationProcess) Execute() (string, error) {
},
Privileged: true,
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
}
// Ensure the root directory for the server exists properly before attempting

View File

@@ -8,7 +8,6 @@ import (
"time"
"github.com/apex/log"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"sync"
"time"
@@ -24,16 +25,14 @@ import (
type Manager struct {
mu sync.RWMutex
client remote.Client
skipVhdInitialization bool
servers []*Server
}
// NewManager returns a new server manager instance. This will boot up all the
// servers that are currently present on the filesystem and set them into the
// manager.
func NewManager(ctx context.Context, client remote.Client, skipVhdInit bool) (*Manager, error) {
func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
m := NewEmptyManager(client)
m.skipVhdInitialization = skipVhdInit
if err := m.init(ctx); err != nil {
return nil, err
}
@@ -185,7 +184,7 @@ func (m *Manager) ReadStates() (map[string]string, error) {
// InitServer initializes a server using a data byte array. This will be
// marshaled into the given struct using a YAML marshaler. This will also
// configure the given environment for a server.
func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfigurationResponse) (*Server, error) {
func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server, error) {
s, err := New(m.client)
if err != nil {
return nil, err
@@ -197,15 +196,7 @@ func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfiguratio
return nil, errors.WithStackIf(err)
}
s.fs = filesystem.New(s.ID(), s.DiskSpace(), s.Config().Egg.FileDenylist)
// If this is a virtual filesystem we need to go ahead and mount the disk
// so that everything is accessible.
if s.fs.IsVirtual() && !m.skipVhdInitialization {
log.WithField("server", s.ID()).Info("mounting virtual disk for server")
if err := s.fs.MountDisk(ctx); err != nil {
return nil, err
}
}
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
// Right now we only support a Docker based environment, so I'm going to hard code
// this logic in. When we're ready to support other environment we'll need to make
@@ -267,7 +258,7 @@ func (m *Manager) init(ctx context.Context) error {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
return
}
s, err := m.InitServer(ctx, d)
s, err := m.InitServer(d)
if err != nil {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
return

View File

@@ -8,7 +8,6 @@ import (
"emperror.dev/errors"
"github.com/google/uuid"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
)

View File

@@ -4,7 +4,6 @@ import (
"testing"
. "github.com/franela/goblin"
"github.com/pterodactyl/wings/system"
)

View File

@@ -179,8 +179,6 @@ func (s *Server) Log() *log.Entry {
//
// This also means mass actions can be performed against servers on the Panel
// and they will automatically sync with Wings when the server is started.
//
// TODO: accept a context value rather than using the server's context.
func (s *Server) Sync() error {
cfg, err := s.client.GetServerConfiguration(s.Context(), s.ID())
if err != nil {
@@ -196,9 +194,7 @@ func (s *Server) Sync() error {
// Update the disk space limits for the server whenever the configuration for
// it changes.
if err := s.fs.SetDiskLimit(s.Context(), s.DiskSpace()); err != nil {
return errors.WrapIf(err, "server: failed to sync server configuration from API")
}
s.fs.SetDiskLimit(s.DiskSpace())
s.SyncWithEnvironment()

View File

@@ -3,7 +3,6 @@ package sftp
import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
)

View File

@@ -1,3 +1,3 @@
package system
var Version = "develop"
var Version = "1.7.1"

View File

@@ -1,10 +1,9 @@
package main
import (
"github.com/pterodactyl/wings/cmd"
"math/rand"
"time"
"github.com/pterodactyl/wings/cmd"
)
func main() {