Compare commits
27 Commits
release/v1
...
dane/vhd
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b00d328107 | ||
|
|
daa0ab75b4 | ||
|
|
ff4b7655c8 | ||
|
|
99cb61a6ef | ||
|
|
0407f22147 | ||
|
|
a74ea6a9ff | ||
|
|
37c52dd439 | ||
|
|
f8a25cb040 | ||
|
|
a6a610fd82 | ||
|
|
dfe5a77e0a | ||
|
|
d8a7bf2dde | ||
|
|
265f8a6b39 | ||
|
|
7fed6a68cb | ||
|
|
b0f99e2328 | ||
|
|
957257ecc3 | ||
|
|
058f643e65 | ||
|
|
6c7065592d | ||
|
|
3f481e9540 | ||
|
|
984bd10cf2 | ||
|
|
f5a64a0d7f | ||
|
|
6fb61261b0 | ||
|
|
3edec80efa | ||
|
|
0637eebefe | ||
|
|
e98d249cf7 | ||
|
|
b20bf6deab | ||
|
|
1b268b5625 | ||
|
|
7245791214 |
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -1,2 +1 @@
|
|||||||
github: [ DaneEveritt ]
|
github: [ matthewpi ]
|
||||||
custom: [ "https://paypal.me/PterodactylSoftware" ]
|
|
||||||
|
|||||||
3
.github/workflows/build-test.yml
vendored
3
.github/workflows/build-test.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-20.04 ]
|
||||||
go: [ '^1.17' ]
|
go: [ '1.18.7' ]
|
||||||
goos: [ linux ]
|
goos: [ linux ]
|
||||||
goarch: [ amd64, arm64 ]
|
goarch: [ amd64, arm64 ]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
@@ -58,7 +58,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH} wings.go
|
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH} wings.go
|
||||||
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH}_debug wings.go
|
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH}_debug wings.go
|
||||||
upx build/wings_${GOOS}_${{ matrix.goarch }}
|
|
||||||
chmod +x build/*
|
chmod +x build/*
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: go test -race ./...
|
run: go test -race ./...
|
||||||
|
|||||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: '^1.17'
|
go-version: '1.18.7'
|
||||||
- name: Build
|
- name: Build
|
||||||
env:
|
env:
|
||||||
REF: ${{ github.ref }}
|
REF: ${{ github.ref }}
|
||||||
@@ -22,8 +22,8 @@ jobs:
|
|||||||
run: go test ./...
|
run: go test ./...
|
||||||
- name: Compress binary and make it executable
|
- name: Compress binary and make it executable
|
||||||
run: |
|
run: |
|
||||||
upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
chmod +x build/wings_linux_amd64
|
||||||
upx build/wings_linux_arm64 && chmod +x build/wings_linux_arm64
|
chmod +x build/wings_linux_arm64
|
||||||
- name: Extract changelog
|
- name: Extract changelog
|
||||||
env:
|
env:
|
||||||
REF: ${{ github.ref }}
|
REF: ${{ github.ref }}
|
||||||
|
|||||||
12
CHANGELOG.md
12
CHANGELOG.md
@@ -1,5 +1,17 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.7.2
|
||||||
|
### Fixed
|
||||||
|
* The S3 backup driver now supports Cloudflare R2
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* During a server transfer, there is a new "Archiving" status that outputs the progress of creating the server transfer archive.
|
||||||
|
* Adds a configuration option to control the list of trusted proxies that can be used to determine the client IP address.
|
||||||
|
* Adds a configuration option to control the Docker username space setting when Wings creates containers.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* Releases are now built using `Go 1.18` — the minimum version required to build Wings is now `Go 1.18`.
|
||||||
|
|
||||||
## v1.7.1
|
## v1.7.1
|
||||||
### Fixed
|
### Fixed
|
||||||
* YAML parser has been updated to fix some strange issues
|
* YAML parser has been updated to fix some strange issues
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Stage 1 (Build)
|
# Stage 1 (Build)
|
||||||
FROM golang:1.17-alpine AS builder
|
FROM golang:1.18-alpine AS builder
|
||||||
|
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
RUN apk add --update --no-cache git make
|
RUN apk add --update --no-cache git make
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -4,6 +4,9 @@ build:
|
|||||||
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_amd64 -v wings.go
|
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_amd64 -v wings.go
|
||||||
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
|
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
|
||||||
|
|
||||||
|
race:
|
||||||
|
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
|
||||||
|
|
||||||
debug:
|
debug:
|
||||||
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
|
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
|
||||||
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1
|
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1
|
||||||
@@ -14,9 +17,6 @@ rmdebug:
|
|||||||
go build -gcflags "all=-N -l" -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
|
go build -gcflags "all=-N -l" -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
|
||||||
sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml
|
sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml
|
||||||
|
|
||||||
compress:
|
|
||||||
upx --brute build/wings_*
|
|
||||||
|
|
||||||
cross-build: clean build compress
|
cross-build: clean build compress
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ dependencies, and allowing users to authenticate with the same credentials they
|
|||||||
|
|
||||||
## Sponsors
|
## Sponsors
|
||||||
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
|
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
|
||||||
[Interested in becoming a sponsor?](https://github.com/sponsors/DaneEveritt)
|
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
|
||||||
|
|
||||||
| Company | About |
|
| Company | About |
|
||||||
| ------- | ----- |
|
| ------- | ----- |
|
||||||
|
|||||||
127
cmd/migrate_vhd.go
Normal file
127
cmd/migrate_vhd.go
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"github.com/apex/log"
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/internal/vhd"
|
||||||
|
"github.com/pterodactyl/wings/loggers/cli"
|
||||||
|
"github.com/pterodactyl/wings/remote"
|
||||||
|
"github.com/pterodactyl/wings/server"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MigrateVHDCommand struct {
|
||||||
|
manager *server.Manager
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMigrateVHDCommand() *cobra.Command {
|
||||||
|
return &cobra.Command{
|
||||||
|
Use: "migrate-vhd",
|
||||||
|
Short: "migrates existing data from a directory tree into virtual hard-disks",
|
||||||
|
PreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
log.SetLevel(log.DebugLevel)
|
||||||
|
log.SetHandler(cli.Default)
|
||||||
|
},
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
client := remote.NewFromConfig(config.Get())
|
||||||
|
manager, err := server.NewManager(cmd.Context(), client, true)
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("error", err).Fatal("failed to create new server manager")
|
||||||
|
}
|
||||||
|
c := &MigrateVHDCommand{
|
||||||
|
manager: manager,
|
||||||
|
}
|
||||||
|
if err := c.Run(cmd.Context()); err != nil {
|
||||||
|
log.WithField("error", err).Fatal("failed to execute command")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run executes the migration command.
|
||||||
|
func (m *MigrateVHDCommand) Run(ctx context.Context) error {
|
||||||
|
if !vhd.Enabled() {
|
||||||
|
return errors.New("cannot migrate to vhd: the underlying driver must be set to \"vhd\"")
|
||||||
|
}
|
||||||
|
for _, s := range m.manager.All() {
|
||||||
|
s.Log().Debug("starting migration of server contents to virtual disk...")
|
||||||
|
|
||||||
|
v := vhd.New(s.DiskSpace(), vhd.DiskPath(s.ID()), s.Filesystem().Path())
|
||||||
|
s.Log().WithField("disk_image", v.Path()).Info("creating virtual disk for server")
|
||||||
|
if err := v.Allocate(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Log().Info("creating virtual filesystem for server")
|
||||||
|
if err := v.MakeFilesystem(ctx); err != nil {
|
||||||
|
// If the filesystem already exists no worries, just move on with our
|
||||||
|
// day here.
|
||||||
|
if !errors.Is(err, vhd.ErrFilesystemExists) {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bak := strings.TrimSuffix(s.Filesystem().Path(), "/") + "_bak"
|
||||||
|
mounted, err := v.IsMounted(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if !mounted {
|
||||||
|
s.Log().WithField("backup_dir", bak).Debug("virtual disk is not yet mounted, creating backup directory")
|
||||||
|
// Create a backup directory of the server files if one does not already exist
|
||||||
|
// at that location. If one does exists we'll just assume it is good to go and
|
||||||
|
// rely on it to provide the files we'll need.
|
||||||
|
if _, err := os.Lstat(bak); os.IsNotExist(err) {
|
||||||
|
if err := os.Rename(s.Filesystem().Path(), bak); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to rename existing data directory for backup")
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
if err := os.RemoveAll(s.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
|
||||||
|
return errors.Wrap(err, "failed to remove base server files path")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s.Log().Warn("server appears to already have existing mount, not creating data backup")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to mount the disk at the expected path now that we've created
|
||||||
|
// a backup of the server files.
|
||||||
|
if err := v.Mount(ctx); err != nil && !errors.Is(err, vhd.ErrFilesystemMounted) {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy over the files from the backup for this server but only
|
||||||
|
// if we have a backup directory currently.
|
||||||
|
_, err = os.Lstat(bak)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
s.Log().WithField("error", err).Warn("failed to stat backup directory")
|
||||||
|
} else {
|
||||||
|
s.Log().Info("no backup data directory exists, not restoring files")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cmd := exec.CommandContext(ctx, "cp", "-r", bak+"/.", s.Filesystem().Path())
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return errors.Wrap(err, "migrate: failed to move old server files into new direcotry")
|
||||||
|
} else {
|
||||||
|
if err := os.RemoveAll(bak); err != nil {
|
||||||
|
s.Log().WithField("directory", bak).WithField("error", err).Warn("failed to remove backup directory")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Log().Info("updating server file ownership...")
|
||||||
|
if err := s.Filesystem().Chown("/"); err != nil {
|
||||||
|
s.Log().WithField("error", err).Warn("failed to update ownership of new server files")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Log().Info("finished migration to virtual disk...")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
27
cmd/root.go
27
cmd/root.go
@@ -5,8 +5,6 @@ import (
|
|||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pterodactyl/wings/internal/cron"
|
|
||||||
"github.com/pterodactyl/wings/internal/database"
|
|
||||||
log2 "log"
|
log2 "log"
|
||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
@@ -18,6 +16,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/cron"
|
||||||
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
|
|
||||||
"github.com/NYTimes/logrotate"
|
"github.com/NYTimes/logrotate"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/apex/log/handlers/multi"
|
"github.com/apex/log/handlers/multi"
|
||||||
@@ -46,8 +47,16 @@ var (
|
|||||||
var rootCommand = &cobra.Command{
|
var rootCommand = &cobra.Command{
|
||||||
Use: "wings",
|
Use: "wings",
|
||||||
Short: "Runs the API server allowing programmatic control of game servers for Pterodactyl Panel.",
|
Short: "Runs the API server allowing programmatic control of game servers for Pterodactyl Panel.",
|
||||||
PreRun: func(cmd *cobra.Command, args []string) {
|
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||||
initConfig()
|
initConfig()
|
||||||
|
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
|
||||||
|
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
|
||||||
|
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
PreRun: func(cmd *cobra.Command, args []string) {
|
||||||
initLogging()
|
initLogging()
|
||||||
if tls, _ := cmd.Flags().GetBool("auto-tls"); tls {
|
if tls, _ := cmd.Flags().GetBool("auto-tls"); tls {
|
||||||
if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" {
|
if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" {
|
||||||
@@ -76,6 +85,7 @@ func Execute() {
|
|||||||
func init() {
|
func init() {
|
||||||
rootCommand.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
rootCommand.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
||||||
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
||||||
|
rootCommand.PersistentFlags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
|
||||||
|
|
||||||
// Flags specifically used when running the API.
|
// Flags specifically used when running the API.
|
||||||
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
|
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
|
||||||
@@ -83,11 +93,11 @@ func init() {
|
|||||||
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
|
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
|
||||||
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
|
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
|
||||||
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
||||||
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
|
|
||||||
|
|
||||||
rootCommand.AddCommand(versionCommand)
|
rootCommand.AddCommand(versionCommand)
|
||||||
rootCommand.AddCommand(configureCmd)
|
rootCommand.AddCommand(configureCmd)
|
||||||
rootCommand.AddCommand(newDiagnosticsCommand())
|
rootCommand.AddCommand(newDiagnosticsCommand())
|
||||||
|
rootCommand.AddCommand(newMigrateVHDCommand())
|
||||||
}
|
}
|
||||||
|
|
||||||
func rootCmdRun(cmd *cobra.Command, _ []string) {
|
func rootCmdRun(cmd *cobra.Command, _ []string) {
|
||||||
@@ -95,13 +105,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
log.Debug("running in debug mode")
|
log.Debug("running in debug mode")
|
||||||
log.WithField("config_file", configPath).Info("loading configuration from file")
|
log.WithField("config_file", configPath).Info("loading configuration from file")
|
||||||
|
|
||||||
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
|
|
||||||
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
|
|
||||||
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := config.ConfigureTimezone(); err != nil {
|
if err := config.ConfigureTimezone(); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
|
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
|
||||||
}
|
}
|
||||||
@@ -136,7 +139,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
log.WithField("error", err).Fatal("failed to initialize database")
|
log.WithField("error", err).Fatal("failed to initialize database")
|
||||||
}
|
}
|
||||||
|
|
||||||
manager, err := server.NewManager(cmd.Context(), pclient)
|
manager, err := server.NewManager(cmd.Context(), pclient, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to load server configurations")
|
log.WithField("error", err).Fatal("failed to load server configurations")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -91,6 +91,9 @@ type ApiConfiguration struct {
|
|||||||
|
|
||||||
// The maximum size for files uploaded through the Panel in MB.
|
// The maximum size for files uploaded through the Panel in MB.
|
||||||
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
||||||
|
|
||||||
|
// A list of IP address of proxies that may send a X-Forwarded-For header to set the true clients IP
|
||||||
|
TrustedProxies []string `json:"trusted_proxies" yaml:"trusted_proxies"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoteQueryConfiguration defines the configuration settings for remote requests
|
// RemoteQueryConfiguration defines the configuration settings for remote requests
|
||||||
@@ -302,6 +305,11 @@ type Configuration struct {
|
|||||||
// is only required by users running Wings without SSL certificates and using internal IP
|
// is only required by users running Wings without SSL certificates and using internal IP
|
||||||
// addresses in order to connect. Most users should NOT enable this setting.
|
// addresses in order to connect. Most users should NOT enable this setting.
|
||||||
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
|
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
|
||||||
|
|
||||||
|
// Servers contains all of the settings that are used when configuring individual servers
|
||||||
|
// on the system. This is a global configuration for all server instances, not to be confused
|
||||||
|
// with the per-server configurations provided by the Panel API.
|
||||||
|
Servers Servers `json:"servers" yaml:"servers"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAtPath creates a new struct and set the path where it should be stored.
|
// NewAtPath creates a new struct and set the path where it should be stored.
|
||||||
|
|||||||
@@ -78,6 +78,14 @@ type DockerConfiguration struct {
|
|||||||
Overhead Overhead `json:"overhead" yaml:"overhead"`
|
Overhead Overhead `json:"overhead" yaml:"overhead"`
|
||||||
|
|
||||||
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
|
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
|
||||||
|
|
||||||
|
// Sets the user namespace mode for the container when user namespace remapping option is
|
||||||
|
// enabled.
|
||||||
|
//
|
||||||
|
// If the value is blank, the daemon's user namespace remapping configuration is used,
|
||||||
|
// if the value is "host", then the pterodactyl containers are started with user namespace
|
||||||
|
// remapping disabled
|
||||||
|
UsernsMode string `default:"" json:"userns_mode" yaml:"userns_mode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegistryConfiguration defines the authentication credentials for a given
|
// RegistryConfiguration defines the authentication credentials for a given
|
||||||
|
|||||||
28
config/config_servers.go
Normal file
28
config/config_servers.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
type FSDriver string
|
||||||
|
|
||||||
|
const (
|
||||||
|
FSDriverLocal FSDriver = "local"
|
||||||
|
FSDriverVHD FSDriver = "vhd"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Servers struct {
|
||||||
|
// Filesystem defines all of the filesystem specific settings used for servers.
|
||||||
|
Filesystem Filesystem `json:"filesystem" yaml:"filesystem"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Filesystem struct {
|
||||||
|
// Driver defines the underlying filesystem driver that is used when a server is
|
||||||
|
// created on the system. This currently supports either of the following drivers:
|
||||||
|
//
|
||||||
|
// local: the local driver is the default one used by Wings. This offloads all of the
|
||||||
|
// disk limit enforcement to Wings itself. This has a performance impact but is
|
||||||
|
// the most compatiable with all systems.
|
||||||
|
// vhd: the vhd driver uses "virtual" disks on the host system to enforce disk limits
|
||||||
|
// on the server. This is more performant since calculations do not need to be made
|
||||||
|
// by Wings itself when enforcing limits. It also avoids vulnerabilities that exist
|
||||||
|
// in the local driver which allow malicious processes to quickly create massive files
|
||||||
|
// before Wings is able to detect and stop them from being written.
|
||||||
|
Driver FSDriver `default:"local" json:"driver" yaml:"driver"`
|
||||||
|
}
|
||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -261,6 +261,7 @@ func (e *Environment) Create() error {
|
|||||||
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
||||||
},
|
},
|
||||||
NetworkMode: networkMode,
|
NetworkMode: networkMode,
|
||||||
|
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {
|
if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
120
go.mod
120
go.mod
@@ -1,129 +1,125 @@
|
|||||||
module github.com/pterodactyl/wings
|
module github.com/pterodactyl/wings
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
emperror.dev/errors v0.8.1
|
emperror.dev/errors v0.8.1
|
||||||
github.com/AlecAivazis/survey/v2 v2.3.4
|
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||||
github.com/Jeffail/gabs/v2 v2.6.1
|
github.com/Jeffail/gabs/v2 v2.6.1
|
||||||
github.com/NYTimes/logrotate v1.0.0
|
github.com/NYTimes/logrotate v1.0.0
|
||||||
github.com/apex/log v1.9.0
|
github.com/apex/log v1.9.0
|
||||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||||
github.com/beevik/etree v1.1.0
|
github.com/beevik/etree v1.1.0
|
||||||
github.com/buger/jsonparser v1.1.1
|
github.com/buger/jsonparser v1.1.1
|
||||||
github.com/cenkalti/backoff/v4 v4.1.2
|
github.com/cenkalti/backoff/v4 v4.1.3
|
||||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
||||||
github.com/creasty/defaults v1.5.2
|
github.com/creasty/defaults v1.6.0
|
||||||
github.com/docker/docker v20.10.14+incompatible
|
github.com/docker/docker v20.10.18+incompatible
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/fatih/color v1.13.0
|
github.com/fatih/color v1.13.0
|
||||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
||||||
github.com/gabriel-vasile/mimetype v1.4.0
|
github.com/gabriel-vasile/mimetype v1.4.1
|
||||||
github.com/gammazero/workerpool v1.1.2
|
github.com/gammazero/workerpool v1.1.3
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
github.com/gin-gonic/gin v1.7.7
|
github.com/gin-gonic/gin v1.8.1
|
||||||
|
github.com/glebarez/sqlite v1.4.8
|
||||||
|
github.com/go-co-op/gocron v1.17.0
|
||||||
|
github.com/goccy/go-json v0.9.11
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/gorilla/websocket v1.5.0
|
||||||
github.com/iancoleman/strcase v0.2.0
|
github.com/iancoleman/strcase v0.2.0
|
||||||
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
|
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
|
||||||
github.com/juju/ratelimit v1.0.1
|
github.com/juju/ratelimit v1.0.2
|
||||||
github.com/karrick/godirwalk v1.16.1
|
github.com/karrick/godirwalk v1.17.0
|
||||||
|
github.com/klauspost/compress v1.15.11
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/magiconair/properties v1.8.6
|
github.com/magiconair/properties v1.8.6
|
||||||
github.com/mattn/go-colorable v0.1.12
|
github.com/mattn/go-colorable v0.1.13
|
||||||
github.com/mholt/archiver/v3 v3.5.1
|
github.com/mholt/archiver/v3 v3.5.1
|
||||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/sftp v1.13.4
|
github.com/pkg/sftp v1.13.5
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||||
github.com/spf13/cobra v1.4.0
|
github.com/spf13/afero v1.9.2
|
||||||
github.com/stretchr/testify v1.7.5
|
github.com/spf13/cobra v1.5.0
|
||||||
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
|
github.com/stretchr/testify v1.8.0
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
|
||||||
gopkg.in/ini.v1 v1.66.4
|
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
|
||||||
|
gopkg.in/ini.v1 v1.67.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/glebarez/sqlite v1.4.6
|
|
||||||
github.com/go-co-op/gocron v1.15.0
|
|
||||||
github.com/goccy/go-json v0.9.6
|
|
||||||
github.com/klauspost/compress v1.15.1
|
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
gorm.io/gorm v1.23.8
|
gorm.io/gorm v1.23.10
|
||||||
)
|
)
|
||||||
|
|
||||||
require golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/containerd/containerd v1.6.2 // indirect
|
|
||||||
github.com/containerd/fifo v1.0.0 // indirect
|
github.com/containerd/fifo v1.0.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||||
github.com/gammazero/deque v0.1.1 // indirect
|
github.com/gammazero/deque v0.2.0 // indirect
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||||
github.com/glebarez/go-sqlite v1.17.3 // indirect
|
github.com/glebarez/go-sqlite v1.19.1 // indirect
|
||||||
github.com/go-playground/locales v0.14.0 // indirect
|
github.com/go-playground/locales v0.14.0 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.10.1 // indirect
|
github.com/go-playground/validator/v10 v10.11.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/gorilla/mux v1.7.4 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/leodido/go-urn v1.2.1 // indirect
|
github.com/leodido/go-urn v1.2.1 // indirect
|
||||||
github.com/magefile/mage v1.13.0 // indirect
|
github.com/magefile/mage v1.14.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/nwaples/rardecode v1.1.3 // indirect
|
github.com/nwaples/rardecode v1.1.3 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.14 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
github.com/ugorji/go/codec v1.2.7 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
|
||||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
|
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/text v0.3.8 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||||
google.golang.org/grpc v1.45.0 // indirect
|
golang.org/x/tools v0.1.12 // indirect
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||||
modernc.org/libc v1.16.17 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
modernc.org/mathutil v1.4.1 // indirect
|
modernc.org/libc v1.20.0 // indirect
|
||||||
modernc.org/memory v1.1.1 // indirect
|
modernc.org/mathutil v1.5.0 // indirect
|
||||||
modernc.org/sqlite v1.17.3 // indirect
|
modernc.org/memory v1.4.0 // indirect
|
||||||
|
modernc.org/sqlite v1.19.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/asaskevich/govalidator"
|
"github.com/asaskevich/govalidator"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
)
|
)
|
||||||
@@ -37,7 +38,7 @@ func New(ctx context.Context, manager *server.Manager, details ServerDetails) (*
|
|||||||
|
|
||||||
// Create a new server instance using the configuration we wrote to the disk
|
// Create a new server instance using the configuration we wrote to the disk
|
||||||
// so that everything gets instantiated correctly on the struct.
|
// so that everything gets instantiated correctly on the struct.
|
||||||
s, err := manager.InitServer(c)
|
s, err := manager.InitServer(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WrapIf(err, "installer: could not init server instance")
|
return nil, errors.WrapIf(err, "installer: could not init server instance")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,7 +2,9 @@ package cron
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/internal/database"
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
|
|||||||
@@ -2,13 +2,15 @@ package cron
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
log2 "github.com/apex/log"
|
log2 "github.com/apex/log"
|
||||||
"github.com/go-co-op/gocron"
|
"github.com/go-co-op/gocron"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const ErrCronRunning = errors.Sentinel("cron: job already running")
|
const ErrCronRunning = errors.Sentinel("cron: job already running")
|
||||||
|
|||||||
@@ -2,12 +2,14 @@ package cron
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/internal/database"
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
"reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type sftpCron struct {
|
type sftpCron struct {
|
||||||
|
|||||||
@@ -1,19 +1,23 @@
|
|||||||
package database
|
package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/glebarez/sqlite"
|
"github.com/glebarez/sqlite"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"gorm.io/gorm/logger"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
"gorm.io/gorm"
|
|
||||||
"gorm.io/gorm/logger"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var o system.AtomicBool
|
var (
|
||||||
var db *gorm.DB
|
o system.AtomicBool
|
||||||
|
db *gorm.DB
|
||||||
|
)
|
||||||
|
|
||||||
// Initialize configures the local SQLite database for Wings and ensures that the models have
|
// Initialize configures the local SQLite database for Wings and ensures that the models have
|
||||||
// been fully migrated.
|
// been fully migrated.
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
package models
|
package models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/pterodactyl/wings/system"
|
|
||||||
"gorm.io/gorm"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"gorm.io/gorm"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Event string
|
type Event string
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package models
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
)
|
)
|
||||||
|
|||||||
330
internal/vhd/vhd.go
Normal file
330
internal/vhd/vhd.go
Normal file
@@ -0,0 +1,330 @@
|
|||||||
|
package vhd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidDiskPathTarget = errors.Sentinel("vhd: disk path is a directory or symlink")
|
||||||
|
ErrMountPathNotDirectory = errors.Sentinel("vhd: mount point is not a directory")
|
||||||
|
ErrFilesystemMounted = errors.Sentinel("vhd: filesystem is already mounted")
|
||||||
|
ErrFilesystemNotMounted = errors.Sentinel("vhd: filesystem is not mounted")
|
||||||
|
ErrFilesystemExists = errors.Sentinel("vhd: filesystem already exists on disk")
|
||||||
|
)
|
||||||
|
|
||||||
|
var useDdAllocation bool
|
||||||
|
var setDdAllocator sync.Once
|
||||||
|
|
||||||
|
// hasExitCode allows this code to test the response error to see if there is
|
||||||
|
// an exit code available from the command call that can be used to determine if
|
||||||
|
// something went wrong.
|
||||||
|
type hasExitCode interface {
|
||||||
|
ExitCode() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commander defines an interface that must be met for executing commands on the
|
||||||
|
// underlying OS. By default the vhd package will use Go's exec.Cmd type for
|
||||||
|
// execution. This interface allows stubbing out on tests, or potentially custom
|
||||||
|
// setups down the line.
|
||||||
|
type Commander interface {
|
||||||
|
Run() error
|
||||||
|
Output() ([]byte, error)
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommanderProvider is a function that provides a struct meeting the Commander
|
||||||
|
// interface requirements.
|
||||||
|
type CommanderProvider func(ctx context.Context, name string, args ...string) Commander
|
||||||
|
|
||||||
|
// CfgOption is a configuration option callback for the Disk.
|
||||||
|
type CfgOption func(d *Disk) *Disk
|
||||||
|
|
||||||
|
// Disk represents the underlying virtual disk for the instance.
|
||||||
|
type Disk struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
// The total size of the disk allowed in bytes.
|
||||||
|
size int64
|
||||||
|
// The path where the disk image should be created.
|
||||||
|
diskPath string
|
||||||
|
// The point at which this disk should be made available on the system. This
|
||||||
|
// is where files can be read/written to.
|
||||||
|
mountAt string
|
||||||
|
fs afero.Fs
|
||||||
|
commander CommanderProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiskPath returns the underlying path that contains the virtual disk for the server
|
||||||
|
// identified by its UUID.
|
||||||
|
func DiskPath(uuid string) string {
|
||||||
|
return filepath.Join(config.Get().System.Data, ".vhd/", uuid+".img")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled returns true when VHD support is enabled on the instance.
|
||||||
|
func Enabled() bool {
|
||||||
|
return config.Get().Servers.Filesystem.Driver == config.FSDriverVHD
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Disk instance. The "size" parameter should be provided in
|
||||||
|
// bytes of space allowed for the disk. An additional slice of option callbacks
|
||||||
|
// can be provided to programatically swap out the underlying filesystem
|
||||||
|
// implementation or the underlying command exection engine.
|
||||||
|
func New(size int64, diskPath string, mountAt string, opts ...func(*Disk)) *Disk {
|
||||||
|
if diskPath == "" || mountAt == "" {
|
||||||
|
panic("vhd: cannot specify an empty disk or mount path")
|
||||||
|
}
|
||||||
|
d := Disk{
|
||||||
|
size: size,
|
||||||
|
diskPath: diskPath,
|
||||||
|
mountAt: mountAt,
|
||||||
|
fs: afero.NewOsFs(),
|
||||||
|
commander: func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return exec.CommandContext(ctx, name, args...)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&d)
|
||||||
|
}
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFs allows for a different underlying filesystem to be provided to the
|
||||||
|
// virtual disk manager.
|
||||||
|
func WithFs(fs afero.Fs) func(*Disk) {
|
||||||
|
return func(d *Disk) {
|
||||||
|
d.fs = fs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCommander allows a different Commander provider to be provided.
|
||||||
|
func WithCommander(c CommanderProvider) func(*Disk) {
|
||||||
|
return func(d *Disk) {
|
||||||
|
d.commander = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Disk) Path() string {
|
||||||
|
return d.diskPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Disk) MountPath() string {
|
||||||
|
return d.mountAt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists reports if the disk exists on the system yet or not. This only verifies
|
||||||
|
// the presence of the disk image, not the validity of it. An error is returned
|
||||||
|
// if the path exists but the destination is not a file or is a symlink.
|
||||||
|
func (d *Disk) Exists() (bool, error) {
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
st, err := d.fs.Stat(d.diskPath)
|
||||||
|
if err != nil && os.IsNotExist(err) {
|
||||||
|
return false, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return false, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
if !st.IsDir() && st.Mode()&os.ModeSymlink == 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, errors.WithStack(ErrInvalidDiskPathTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMounted checks to see if the given disk is currently mounted.
|
||||||
|
func (d *Disk) IsMounted(ctx context.Context) (bool, error) {
|
||||||
|
find := d.mountAt + " ext4"
|
||||||
|
cmd := d.commander(ctx, "grep", "-qs", find, "/proc/mounts")
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if v, ok := err.(hasExitCode); ok {
|
||||||
|
if v.ExitCode() == 1 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, errors.Wrap(err, "vhd: failed to execute grep for mount existence")
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount attempts to mount the disk as configured. If it does not exist or the
|
||||||
|
// mount command fails an error will be returned to the caller. This does not
|
||||||
|
// attempt to create the disk if it is missing from the filesystem.
|
||||||
|
//
|
||||||
|
// Attempting to mount a disk which does not exist will result in an error being
|
||||||
|
// returned to the caller. If the disk is already mounted an ErrFilesystemMounted
|
||||||
|
// error is returned to the caller.
|
||||||
|
func (d *Disk) Mount(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
return d.mount(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmount attempts to unmount the disk from the system. If the disk is not
|
||||||
|
// currently mounted this function is a no-op and ErrFilesystemNotMounted is
|
||||||
|
// returned to the caller.
|
||||||
|
func (d *Disk) Unmount(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
return d.unmount(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate executes the "fallocate" command on the disk. This will first unmount
|
||||||
|
// the disk from the system before attempting to actually allocate the space. If
|
||||||
|
// this disk already exists on the machine it will be resized accordingly.
|
||||||
|
//
|
||||||
|
// DANGER! This will unmount the disk from the machine while performing this
|
||||||
|
// action, use caution when calling it during normal processes.
|
||||||
|
func (d *Disk) Allocate(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
if exists, err := d.Exists(); exists {
|
||||||
|
// If the disk currently exists attempt to unmount the mount point before
|
||||||
|
// allocating space.
|
||||||
|
if err := d.Unmount(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to check for existence of root disk")
|
||||||
|
}
|
||||||
|
trim := path.Base(d.diskPath)
|
||||||
|
if err := d.fs.MkdirAll(strings.TrimSuffix(d.diskPath, trim), 0700); err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to create base vhd disk directory")
|
||||||
|
}
|
||||||
|
cmd := d.allocationCmd(ctx)
|
||||||
|
if _, err := cmd.Output(); err != nil {
|
||||||
|
msg := "vhd: failed to execute space allocation command"
|
||||||
|
if v, ok := err.(*exec.ExitError); ok {
|
||||||
|
stderr := strings.Trim(string(v.Stderr), ".\n")
|
||||||
|
if !useDdAllocation && strings.HasSuffix(stderr, "not supported") {
|
||||||
|
// Try again: fallocate is not supported on some filesystems so we'll fall
|
||||||
|
// back to making use of dd for subsequent operations.
|
||||||
|
setDdAllocator.Do(func() {
|
||||||
|
useDdAllocation = true
|
||||||
|
})
|
||||||
|
return d.Allocate(ctx)
|
||||||
|
}
|
||||||
|
msg = msg + ": " + stderr
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, msg)
|
||||||
|
}
|
||||||
|
return errors.WithStack(d.fs.Chmod(d.diskPath, 0600))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resize will change the internal disk size limit and then allocate the new
|
||||||
|
// space to the disk automatically.
|
||||||
|
func (d *Disk) Resize(ctx context.Context, size int64) error {
|
||||||
|
atomic.StoreInt64(&d.size, size)
|
||||||
|
return d.Allocate(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy removes the underlying allocated disk image and unmounts the disk.
|
||||||
|
func (d *Disk) Destroy(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
if err := d.unmount(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
return errors.WithStackIf(d.fs.RemoveAll(d.mountAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeFilesystem will attempt to execute the "mkfs" command against the disk on
|
||||||
|
// the machine. If the disk has already been created this command will return an
|
||||||
|
// ErrFilesystemExists error to the caller. You should manually unmount the disk
|
||||||
|
// if it shouldn't be mounted at this point.
|
||||||
|
func (d *Disk) MakeFilesystem(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
// If no error is returned when mounting DO NOT execute this command as it will
|
||||||
|
// completely destroy the data stored at that location.
|
||||||
|
err := d.Mount(ctx)
|
||||||
|
if err == nil || errors.Is(err, ErrFilesystemMounted) {
|
||||||
|
// If it wasn't already mounted try to clean up at this point and unmount
|
||||||
|
// the disk. If this fails just ignore it for now.
|
||||||
|
if err != nil {
|
||||||
|
_ = d.Unmount(ctx)
|
||||||
|
}
|
||||||
|
return ErrFilesystemExists
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), "can't find in /etc/fstab") && !strings.Contains(err.Error(), "exit status 32") {
|
||||||
|
return errors.WrapIf(err, "vhd: unexpected error from mount command")
|
||||||
|
}
|
||||||
|
// As long as we got an error back that was because we couldn't find thedisk
|
||||||
|
// in the /etc/fstab file we're good. Otherwise it means the disk probably exists
|
||||||
|
// or something else went wrong.
|
||||||
|
//
|
||||||
|
// Because this is a destructive command and non-tty based exection of it implies
|
||||||
|
// "-F" (force), we need to only run it when we can guarantee it doesn't already
|
||||||
|
// exist. No vague "maybe that error is expected" allowed here.
|
||||||
|
cmd := d.commander(ctx, "mkfs", "-t", "ext4", d.diskPath)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to make filesystem for disk")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Disk) mount(ctx context.Context) error {
|
||||||
|
if isMounted, err := d.IsMounted(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
} else if isMounted {
|
||||||
|
return ErrFilesystemMounted
|
||||||
|
}
|
||||||
|
|
||||||
|
if st, err := d.fs.Stat(d.mountAt); err != nil && !os.IsNotExist(err) {
|
||||||
|
return errors.Wrap(err, "vhd: failed to stat mount path")
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
if err := d.fs.MkdirAll(d.mountAt, 0700); err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to create mount path")
|
||||||
|
}
|
||||||
|
} else if !st.IsDir() {
|
||||||
|
return errors.WithStack(ErrMountPathNotDirectory)
|
||||||
|
}
|
||||||
|
|
||||||
|
u := config.Get().System.User
|
||||||
|
if err := d.fs.Chown(d.mountAt, u.Uid, u.Gid); err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to chown mount point")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := d.commander(ctx, "mount", "-t", "auto", "-o", "loop", d.diskPath, d.mountAt)
|
||||||
|
if _, err := cmd.Output(); err != nil {
|
||||||
|
msg := "vhd: failed to mount disk"
|
||||||
|
if v, ok := err.(*exec.ExitError); ok {
|
||||||
|
msg = msg + ": " + strings.Trim(string(v.Stderr), ".\n")
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, msg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Disk) unmount(ctx context.Context) error {
|
||||||
|
cmd := d.commander(ctx, "umount", d.mountAt)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
v, ok := err.(hasExitCode)
|
||||||
|
if ok && v.ExitCode() == 32 {
|
||||||
|
return ErrFilesystemNotMounted
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "vhd: failed to execute unmount command for disk")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocationCmd returns the command to allocate the disk image. This will attempt to
|
||||||
|
// use the fallocate command if available, otherwise it will fall back to dd if the
|
||||||
|
// fallocate command has previously failed.
|
||||||
|
//
|
||||||
|
// We use 1024 as the multiplier for all of the disk space logic within the application.
|
||||||
|
// Passing "K" (/1024) is the same as "KiB" for fallocate, but is different than "KB" (/1000).
|
||||||
|
func (d *Disk) allocationCmd(ctx context.Context) Commander {
|
||||||
|
s := atomic.LoadInt64(&d.size) / 1024
|
||||||
|
if useDdAllocation {
|
||||||
|
return d.commander(ctx, "dd", "if=/dev/zero", fmt.Sprintf("of=%s", d.diskPath), fmt.Sprintf("bs=%dk", s), "count=1")
|
||||||
|
}
|
||||||
|
return d.commander(ctx, "fallocate", "-l", fmt.Sprintf("%dK", s), d.diskPath)
|
||||||
|
}
|
||||||
476
internal/vhd/vhd_test.go
Normal file
476
internal/vhd/vhd_test.go
Normal file
@@ -0,0 +1,476 @@
|
|||||||
|
package vhd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
config.Set(&config.Configuration{
|
||||||
|
AuthenticationToken: "token123",
|
||||||
|
System: config.SystemConfiguration{
|
||||||
|
User: struct {
|
||||||
|
Uid int
|
||||||
|
Gid int
|
||||||
|
}{Uid: 10, Gid: 10},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockCmd struct {
|
||||||
|
run func() error
|
||||||
|
output func() ([]byte, error)
|
||||||
|
string func() string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCmd) Run() error {
|
||||||
|
if m.run != nil {
|
||||||
|
return m.run()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCmd) Output() ([]byte, error) {
|
||||||
|
if m.output != nil {
|
||||||
|
return m.output()
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCmd) String() string {
|
||||||
|
if m.string != nil {
|
||||||
|
return m.string()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Commander = (*mockCmd)(nil)
|
||||||
|
|
||||||
|
type mockedExitCode struct {
|
||||||
|
code int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockedExitCode) ExitCode() int {
|
||||||
|
return m.code
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockedExitCode) Error() string {
|
||||||
|
return fmt.Sprintf("mocked exit code: code %d", m.code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockDisk(c CommanderProvider) *Disk {
|
||||||
|
commander := func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{}
|
||||||
|
}
|
||||||
|
w := commander
|
||||||
|
if c != nil {
|
||||||
|
w = c
|
||||||
|
}
|
||||||
|
return New(100 * 1024 * 1024, "/disk.img", "/mnt", WithFs(afero.NewMemMapFs()), WithCommander(w))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_New(t *testing.T) {
|
||||||
|
t.Run("creates expected struct", func(t *testing.T) {
|
||||||
|
d := New(100 * 1024 * 1024, "/disk.img", "/mnt")
|
||||||
|
assert.NotNil(t, d)
|
||||||
|
assert.Equal(t, int64(100 * 1024 * 1024), d.size)
|
||||||
|
assert.Equal(t, "/disk.img", d.diskPath)
|
||||||
|
assert.Equal(t, "/mnt", d.mountAt)
|
||||||
|
|
||||||
|
// Ensure by default we get a commander interface returned and that it
|
||||||
|
// returns an *exec.Cmd.
|
||||||
|
o := d.commander(context.TODO(), "foo", "-bar")
|
||||||
|
assert.NotNil(t, o)
|
||||||
|
_, ok := o.(Commander)
|
||||||
|
assert.True(t, ok)
|
||||||
|
_, ok = o.(*exec.Cmd)
|
||||||
|
assert.True(t, ok)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("creates an instance with custom options", func(t *testing.T) {
|
||||||
|
fs := afero.NewMemMapFs()
|
||||||
|
|
||||||
|
cprov := struct {
|
||||||
|
Commander
|
||||||
|
}{}
|
||||||
|
c := func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &cprov
|
||||||
|
}
|
||||||
|
|
||||||
|
d := New(100, "/disk.img", "/mnt", WithFs(fs), WithCommander(c))
|
||||||
|
assert.NotNil(t, d)
|
||||||
|
assert.Same(t, fs, d.fs)
|
||||||
|
assert.Same(t, &cprov, d.commander(context.TODO(), ""))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("panics if either path is empty", func(t *testing.T) {
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
_ = New(100, "", "/bar")
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
_ = New(100, "/foo", "")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_Exists(t *testing.T) {
|
||||||
|
t.Run("it exists", func(t *testing.T) {
|
||||||
|
d := newMockDisk(nil)
|
||||||
|
f, err := d.fs.Create("/disk.img")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
exists, err := d.Exists()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, exists)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("it does not exist", func(t *testing.T) {
|
||||||
|
d := newMockDisk(nil)
|
||||||
|
exists, err := d.Exists()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, exists)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("it reports errors", func(t *testing.T) {
|
||||||
|
d := newMockDisk(nil)
|
||||||
|
err := d.fs.Mkdir("/disk.img", 0600)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
exists, err := d.Exists()
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.False(t, exists)
|
||||||
|
assert.EqualError(t, err, ErrInvalidDiskPathTarget.Error())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_IsMounted(t *testing.T) {
|
||||||
|
t.Run("executes command and finds mounted disk", func(t *testing.T) {
|
||||||
|
is := assert.New(t)
|
||||||
|
var called bool
|
||||||
|
|
||||||
|
pctx := context.TODO()
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
called = true
|
||||||
|
is.Same(pctx, ctx)
|
||||||
|
is.Equal("grep", name)
|
||||||
|
is.Len(args, 3)
|
||||||
|
is.Equal([]string{"-qs", "/mnt ext4", "/proc/mounts"}, args)
|
||||||
|
|
||||||
|
return &mockCmd{}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
mnt, err := disk.IsMounted(pctx)
|
||||||
|
is.NoError(err)
|
||||||
|
is.True(mnt)
|
||||||
|
is.True(called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles exit code 1 gracefully", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
called = true
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
mnt, err := disk.IsMounted(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, mnt)
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles unexpected errors successfully", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 3}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
mnt, err := disk.IsMounted(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.False(t, mnt)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_Mount(t *testing.T) {
|
||||||
|
failedCmd := func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{run: func() error {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("error is returned if mount point is not a directory", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(failedCmd)
|
||||||
|
_, err := disk.fs.Create("/mnt")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = disk.Mount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, ErrMountPathNotDirectory.Error())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if mount point cannot be created", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(failedCmd)
|
||||||
|
disk.fs = afero.NewReadOnlyFs(disk.fs)
|
||||||
|
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "vhd: failed to create mount path: operation not permitted")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if already mounted", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(nil)
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, ErrFilesystemMounted.Error())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if mount command fails", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
},
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
called = true
|
||||||
|
|
||||||
|
assert.Equal(t, "mount", name)
|
||||||
|
assert.Equal(t, []string{"-t", "auto", "-o", "loop", "/disk.img", "/mnt"}, args)
|
||||||
|
|
||||||
|
return nil, &exec.ExitError{
|
||||||
|
ProcessState: &os.ProcessState{},
|
||||||
|
Stderr: []byte("foo bar.\n"),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "vhd: failed to mount disk: foo bar: exit status 0")
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("disk can be mounted at existing path", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(failedCmd)
|
||||||
|
require.NoError(t, disk.fs.Mkdir("/mnt", 0600))
|
||||||
|
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("disk can be mounted at non-existing path", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(failedCmd)
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
st, err := disk.fs.Stat("/mnt")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, st.IsDir())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_Unmount(t *testing.T) {
|
||||||
|
t.Run("can unmount a disk", func(t *testing.T) {
|
||||||
|
is := assert.New(t)
|
||||||
|
pctx := context.TODO()
|
||||||
|
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
called = true
|
||||||
|
|
||||||
|
is.Same(pctx, ctx)
|
||||||
|
is.Equal("umount", name)
|
||||||
|
is.Equal([]string{"/mnt"}, args)
|
||||||
|
|
||||||
|
return &mockCmd{}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Unmount(pctx)
|
||||||
|
is.NoError(err)
|
||||||
|
is.True(called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles exit code 32 correctly", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 32}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Unmount(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("non code 32 errors are returned as error", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Unmount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("errors without ExitCode function are returned", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return errors.New("foo bar")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Unmount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_Allocate(t *testing.T) {
|
||||||
|
t.Run("disk is unmounted before allocating space", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
called = true
|
||||||
|
assert.Equal(t, "fallocate", name)
|
||||||
|
assert.Equal(t, []string{"-l", "102400K", "/disk.img"}, args)
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.fs.Mkdir("/mnt", 0600)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = disk.Allocate(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("disk space is allocated even when not exists", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(nil)
|
||||||
|
err := disk.Allocate(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if command fails", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
return nil, &exec.ExitError{
|
||||||
|
ProcessState: &os.ProcessState{},
|
||||||
|
Stderr: []byte("foo bar.\n"),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
_, err := disk.fs.Create("/disk.img")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = disk.Allocate(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "vhd: failed to execute fallocate command: foo bar: exit status 0")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_MakeFilesystem(t *testing.T) {
|
||||||
|
t.Run("filesystem is created if not found in /etc/fstab", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
// Expect the call from IsMounted here and just return what we need
|
||||||
|
// to indicate that nothing is currently mounted.
|
||||||
|
if name == "grep" {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
}
|
||||||
|
called = true
|
||||||
|
assert.Equal(t, "mkfs", name)
|
||||||
|
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
return nil, errors.New("error: can't find in /etc/fstab foo bar testing")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.MakeFilesystem(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("filesystem is created if error is returned from mount command", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
// Expect the call from IsMounted here and just return what we need
|
||||||
|
// to indicate that nothing is currently mounted.
|
||||||
|
if name == "grep" {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
}
|
||||||
|
called = true
|
||||||
|
assert.Equal(t, "mkfs", name)
|
||||||
|
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
if name == "mount" {
|
||||||
|
return nil, &exec.ExitError{
|
||||||
|
Stderr: []byte("foo bar: exit status 32\n"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.MakeFilesystem(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if currently mounted", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(nil)
|
||||||
|
err := disk.MakeFilesystem(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, ErrFilesystemExists.Error())
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -4,18 +4,20 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/cenkalti/backoff/v4"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -58,6 +60,18 @@ func New(base string, opts ...ClientOption) Client {
|
|||||||
return &c
|
return &c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewFromConfig returns a new Client using the configuration passed through
|
||||||
|
// by the caller.
|
||||||
|
func NewFromConfig(cfg *config.Configuration, opts ...ClientOption) Client {
|
||||||
|
passOpts := []ClientOption{
|
||||||
|
WithCredentials(cfg.AuthenticationTokenId, cfg.AuthenticationToken),
|
||||||
|
WithHttpClient(&http.Client{
|
||||||
|
Timeout: time.Second * time.Duration(cfg.RemoteQuery.Timeout),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
return New(cfg.PanelLocation, append(passOpts, opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
// WithCredentials sets the credentials to use when making request to the remote
|
// WithCredentials sets the credentials to use when making request to the remote
|
||||||
// API endpoint.
|
// API endpoint.
|
||||||
func WithCredentials(id, token string) ClientOption {
|
func WithCredentials(id, token string) ClientOption {
|
||||||
|
|||||||
@@ -3,10 +3,11 @@ package remote
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|||||||
@@ -2,11 +2,12 @@ package remote
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"github.com/apex/log"
|
|
||||||
"github.com/goccy/go-json"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/apex/log"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/parser"
|
"github.com/pterodactyl/wings/parser"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -156,9 +157,15 @@ type BackupRemoteUploadResponse struct {
|
|||||||
PartSize int64 `json:"part_size"`
|
PartSize int64 `json:"part_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BackupPart struct {
|
||||||
|
ETag string `json:"etag"`
|
||||||
|
PartNumber int `json:"part_number"`
|
||||||
|
}
|
||||||
|
|
||||||
type BackupRequest struct {
|
type BackupRequest struct {
|
||||||
Checksum string `json:"checksum"`
|
Checksum string `json:"checksum"`
|
||||||
ChecksumType string `json:"checksum_type"`
|
ChecksumType string `json:"checksum_type"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Successful bool `json:"successful"`
|
Successful bool `json:"successful"`
|
||||||
|
Parts []BackupPart `json:"parts"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
"github.com/pterodactyl/wings/router/middleware"
|
"github.com/pterodactyl/wings/router/middleware"
|
||||||
wserver "github.com/pterodactyl/wings/server"
|
wserver "github.com/pterodactyl/wings/server"
|
||||||
@@ -15,6 +16,7 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
|
|||||||
|
|
||||||
router := gin.New()
|
router := gin.New()
|
||||||
router.Use(gin.Recovery())
|
router.Use(gin.Recovery())
|
||||||
|
router.SetTrustedProxies(config.Get().Api.TrustedProxies)
|
||||||
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
|
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
|
||||||
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
|
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
|
||||||
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/router/downloader"
|
"github.com/pterodactyl/wings/router/downloader"
|
||||||
"github.com/pterodactyl/wings/router/middleware"
|
"github.com/pterodactyl/wings/router/middleware"
|
||||||
"github.com/pterodactyl/wings/router/tokens"
|
"github.com/pterodactyl/wings/router/tokens"
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package router
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -14,6 +13,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
@@ -602,7 +603,7 @@ func postServerUploadFiles(c *gin.Context) {
|
|||||||
NewServerError(err, s).Abort(c)
|
NewServerError(err, s).Abort(c)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.Request.RemoteAddr), server.ActivityFileUploaded, models.ActivityMeta{
|
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.ClientIP()), server.ActivityFileUploaded, models.ActivityMeta{
|
||||||
"file": header.Filename,
|
"file": header.Filename,
|
||||||
"directory": filepath.Clean(directory),
|
"directory": filepath.Clean(directory),
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func getServerWebsocket(c *gin.Context) {
|
|||||||
ctx, cancel := context.WithCancel(c.Request.Context())
|
ctx, cancel := context.WithCancel(c.Request.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
handler, err := websocket.GetHandler(s, c.Writer, c.Request)
|
handler, err := websocket.GetHandler(s, c.Writer, c.Request, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
NewServerError(err, s).Abort(c)
|
NewServerError(err, s).Abort(c)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
@@ -30,19 +29,9 @@ import (
|
|||||||
"github.com/pterodactyl/wings/router/tokens"
|
"github.com/pterodactyl/wings/router/tokens"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"github.com/pterodactyl/wings/server/filesystem"
|
"github.com/pterodactyl/wings/server/filesystem"
|
||||||
"github.com/pterodactyl/wings/system"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Number of ticks in the progress bar
|
const progressWidth = 25
|
||||||
const ticks = 25
|
|
||||||
|
|
||||||
// 100% / number of ticks = percentage represented by each tick
|
|
||||||
const tickPercentage = 100 / ticks
|
|
||||||
|
|
||||||
type downloadProgress struct {
|
|
||||||
size int64
|
|
||||||
progress int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data passed over to initiate a server transfer.
|
// Data passed over to initiate a server transfer.
|
||||||
type serverTransferRequest struct {
|
type serverTransferRequest struct {
|
||||||
@@ -95,7 +84,7 @@ func getServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute sha1 checksum.
|
// Compute sha256 checksum.
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
f, err := os.Open(archivePath)
|
f, err := os.Open(archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -184,11 +173,35 @@ func postServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get the disk usage of the server (used to calculate the progress of the archive process)
|
||||||
|
rawSize, err := s.Filesystem().DiskUsage(true)
|
||||||
|
if err != nil {
|
||||||
|
sendTransferLog("Failed to get disk usage for server, aborting transfer..")
|
||||||
|
l.WithField("error", err).Error("failed to get disk usage for server")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Create an archive of the entire server's data directory.
|
// Create an archive of the entire server's data directory.
|
||||||
a := &filesystem.Archive{
|
a := &filesystem.Archive{
|
||||||
BasePath: s.Filesystem().Path(),
|
BasePath: s.Filesystem().Path(),
|
||||||
|
Progress: filesystem.NewProgress(rawSize),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send the archive progress to the websocket every 3 seconds.
|
||||||
|
ctx, cancel := context.WithCancel(s.Context())
|
||||||
|
defer cancel()
|
||||||
|
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
|
||||||
|
defer t.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-t.C:
|
||||||
|
sendTransferLog("Archiving " + p.Progress(progressWidth))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(ctx, a.Progress, time.NewTicker(5*time.Second))
|
||||||
|
|
||||||
// Attempt to get an archive of the server.
|
// Attempt to get an archive of the server.
|
||||||
if err := a.Create(getArchivePath(s.ID())); err != nil {
|
if err := a.Create(getArchivePath(s.ID())); err != nil {
|
||||||
sendTransferLog("An error occurred while archiving the server: " + err.Error())
|
sendTransferLog("An error occurred while archiving the server: " + err.Error())
|
||||||
@@ -196,6 +209,12 @@ func postServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cancel the progress ticker.
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
// Show 100% completion.
|
||||||
|
sendTransferLog("Archiving " + a.Progress.Progress(progressWidth))
|
||||||
|
|
||||||
sendTransferLog("Successfully created archive, attempting to notify panel..")
|
sendTransferLog("Successfully created archive, attempting to notify panel..")
|
||||||
l.Info("successfully created server transfer archive, notifying panel..")
|
l.Info("successfully created server transfer archive, notifying panel..")
|
||||||
|
|
||||||
@@ -223,12 +242,6 @@ func postServerArchive(c *gin.Context) {
|
|||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *downloadProgress) Write(v []byte) (int, error) {
|
|
||||||
n := len(v)
|
|
||||||
atomic.AddInt64(&w.progress, int64(n))
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log helper function to attach all errors and info output to a consistently formatted
|
// Log helper function to attach all errors and info output to a consistently formatted
|
||||||
// log string for easier querying.
|
// log string for easier querying.
|
||||||
func (str serverTransferRequest) log() *log.Entry {
|
func (str serverTransferRequest) log() *log.Entry {
|
||||||
@@ -321,7 +334,7 @@ func postTransfer(c *gin.Context) {
|
|||||||
manager := middleware.ExtractManager(c)
|
manager := middleware.ExtractManager(c)
|
||||||
u, err := uuid.Parse(data.ServerID)
|
u, err := uuid.Parse(data.ServerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
WithError(c, err)
|
_ = WithError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Force the server ID to be a valid UUID string at this point. If it is not an error
|
// Force the server ID to be a valid UUID string at this point. If it is not an error
|
||||||
@@ -331,11 +344,12 @@ func postTransfer(c *gin.Context) {
|
|||||||
|
|
||||||
data.log().Info("handling incoming server transfer request")
|
data.log().Info("handling incoming server transfer request")
|
||||||
go func(data *serverTransferRequest) {
|
go func(data *serverTransferRequest) {
|
||||||
|
ctx := context.Background()
|
||||||
hasError := true
|
hasError := true
|
||||||
|
|
||||||
// Create a new server installer. This will only configure the environment and not
|
// Create a new server installer. This will only configure the environment and not
|
||||||
// run the installer scripts.
|
// run the installer scripts.
|
||||||
i, err := installer.New(context.Background(), manager, data.Server)
|
i, err := installer.New(ctx, manager, data.Server)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = data.sendTransferStatus(manager.Client(), false)
|
_ = data.sendTransferStatus(manager.Client(), false)
|
||||||
data.log().WithField("error", err).Error("failed to validate received server data")
|
data.log().WithField("error", err).Error("failed to validate received server data")
|
||||||
@@ -407,25 +421,22 @@ func postTransfer(c *gin.Context) {
|
|||||||
sendTransferLog("Writing archive to disk...")
|
sendTransferLog("Writing archive to disk...")
|
||||||
data.log().Info("writing transfer archive to disk...")
|
data.log().Info("writing transfer archive to disk...")
|
||||||
|
|
||||||
// Copy the file.
|
progress := filesystem.NewProgress(size)
|
||||||
progress := &downloadProgress{size: size}
|
|
||||||
ticker := time.NewTicker(3 * time.Second)
|
// Send the archive progress to the websocket every 3 seconds.
|
||||||
go func(progress *downloadProgress, t *time.Ticker) {
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
for range ticker.C {
|
defer cancel()
|
||||||
// p = 100 (Downloaded)
|
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
|
||||||
// size = 1000 (Content-Length)
|
defer t.Stop()
|
||||||
// p / size = 0.1
|
for {
|
||||||
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
select {
|
||||||
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
case <-ctx.Done():
|
||||||
// 2.5 (Number of ticks as a float64)
|
return
|
||||||
// 2 (convert to an integer)
|
case <-t.C:
|
||||||
p := atomic.LoadInt64(&progress.progress)
|
sendTransferLog("Downloading " + p.Progress(progressWidth))
|
||||||
// We have to cast these numbers to float in order to get a float result from the division.
|
|
||||||
width := ((float64(p) / float64(size)) * 100) / tickPercentage
|
|
||||||
bar := strings.Repeat("=", int(width)) + strings.Repeat(" ", ticks-int(width))
|
|
||||||
sendTransferLog("Downloading [" + bar + "] " + system.FormatBytes(p) + " / " + system.FormatBytes(progress.size))
|
|
||||||
}
|
}
|
||||||
}(progress, ticker)
|
}
|
||||||
|
}(ctx, progress, time.NewTicker(5*time.Second))
|
||||||
|
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
|
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
|
||||||
@@ -438,18 +449,16 @@ func postTransfer(c *gin.Context) {
|
|||||||
|
|
||||||
buf := make([]byte, 1024*4)
|
buf := make([]byte, 1024*4)
|
||||||
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
|
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
|
||||||
ticker.Stop()
|
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
|
|
||||||
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
|
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
|
||||||
data.log().WithField("error", err).Error("failed to copy archive file to disk")
|
data.log().WithField("error", err).Error("failed to copy archive file to disk")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ticker.Stop()
|
cancel()
|
||||||
|
|
||||||
// Show 100% completion.
|
// Show 100% completion.
|
||||||
humanSize := system.FormatBytes(progress.size)
|
sendTransferLog("Downloading " + progress.Progress(progressWidth))
|
||||||
sendTransferLog("Downloading [" + strings.Repeat("=", ticks) + "] " + humanSize + " / " + humanSize)
|
|
||||||
|
|
||||||
if err := file.Close(); err != nil {
|
if err := file.Close(); err != nil {
|
||||||
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")
|
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
|
|
||||||
|
|||||||
@@ -3,18 +3,21 @@ package websocket
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
@@ -79,7 +82,7 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetHandler returns a new websocket handler using the context provided.
|
// GetHandler returns a new websocket handler using the context provided.
|
||||||
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Handler, error) {
|
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin.Context) (*Handler, error) {
|
||||||
upgrader := websocket.Upgrader{
|
upgrader := websocket.Upgrader{
|
||||||
// Ensure that the websocket request is originating from the Panel itself,
|
// Ensure that the websocket request is originating from the Panel itself,
|
||||||
// and not some other location.
|
// and not some other location.
|
||||||
@@ -111,7 +114,7 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
|
|||||||
Connection: conn,
|
Connection: conn,
|
||||||
jwt: nil,
|
jwt: nil,
|
||||||
server: s,
|
server: s,
|
||||||
ra: s.NewRequestActivity("", r.RemoteAddr),
|
ra: s.NewRequestActivity("", c.ClientIP()),
|
||||||
uuid: u,
|
uuid: u,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,10 +2,12 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/internal/database"
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const ActivityPowerPrefix = "server:power."
|
const ActivityPowerPrefix = "server:power."
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ type RestoreCallback func(file string, r io.Reader, mode fs.FileMode, atime, mti
|
|||||||
// noinspection GoNameStartsWithPackageName
|
// noinspection GoNameStartsWithPackageName
|
||||||
type BackupInterface interface {
|
type BackupInterface interface {
|
||||||
// SetClient sets the API request client on the backup interface.
|
// SetClient sets the API request client on the backup interface.
|
||||||
SetClient(c remote.Client)
|
SetClient(remote.Client)
|
||||||
// Identifier returns the UUID of this backup as tracked by the panel
|
// Identifier returns the UUID of this backup as tracked by the panel
|
||||||
// instance.
|
// instance.
|
||||||
Identifier() string
|
Identifier() string
|
||||||
@@ -41,7 +41,7 @@ type BackupInterface interface {
|
|||||||
WithLogContext(map[string]interface{})
|
WithLogContext(map[string]interface{})
|
||||||
// Generate creates a backup in whatever the configured source for the
|
// Generate creates a backup in whatever the configured source for the
|
||||||
// specific implementation is.
|
// specific implementation is.
|
||||||
Generate(ctx context.Context, basePath string, ignore string) (*ArchiveDetails, error)
|
Generate(context.Context, string, string) (*ArchiveDetails, error)
|
||||||
// Ignored returns the ignored files for this backup instance.
|
// Ignored returns the ignored files for this backup instance.
|
||||||
Ignored() string
|
Ignored() string
|
||||||
// Checksum returns a SHA1 checksum for the generated backup.
|
// Checksum returns a SHA1 checksum for the generated backup.
|
||||||
@@ -53,13 +53,13 @@ type BackupInterface interface {
|
|||||||
// to store it until it is moved to the final spot.
|
// to store it until it is moved to the final spot.
|
||||||
Path() string
|
Path() string
|
||||||
// Details returns details about the archive.
|
// Details returns details about the archive.
|
||||||
Details(ctx context.Context) (*ArchiveDetails, error)
|
Details(context.Context, []remote.BackupPart) (*ArchiveDetails, error)
|
||||||
// Remove removes a backup file.
|
// Remove removes a backup file.
|
||||||
Remove() error
|
Remove() error
|
||||||
// Restore is called when a backup is ready to be restored to the disk from
|
// Restore is called when a backup is ready to be restored to the disk from
|
||||||
// the given source. Not every backup implementation will support this nor
|
// the given source. Not every backup implementation will support this nor
|
||||||
// will every implementation require a reader be provided.
|
// will every implementation require a reader be provided.
|
||||||
Restore(ctx context.Context, reader io.Reader, callback RestoreCallback) error
|
Restore(context.Context, io.Reader, RestoreCallback) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type Backup struct {
|
type Backup struct {
|
||||||
@@ -119,8 +119,8 @@ func (b *Backup) Checksum() ([]byte, error) {
|
|||||||
|
|
||||||
// Details returns both the checksum and size of the archive currently stored on
|
// Details returns both the checksum and size of the archive currently stored on
|
||||||
// the disk to the caller.
|
// the disk to the caller.
|
||||||
func (b *Backup) Details(ctx context.Context) (*ArchiveDetails, error) {
|
func (b *Backup) Details(ctx context.Context, parts []remote.BackupPart) (*ArchiveDetails, error) {
|
||||||
ad := ArchiveDetails{ChecksumType: "sha1"}
|
ad := ArchiveDetails{ChecksumType: "sha1", Parts: parts}
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
@@ -165,6 +165,7 @@ type ArchiveDetails struct {
|
|||||||
Checksum string `json:"checksum"`
|
Checksum string `json:"checksum"`
|
||||||
ChecksumType string `json:"checksum_type"`
|
ChecksumType string `json:"checksum_type"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
|
Parts []remote.BackupPart `json:"parts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToRequest returns a request object.
|
// ToRequest returns a request object.
|
||||||
@@ -174,5 +175,6 @@ func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
|
|||||||
ChecksumType: ad.ChecksumType,
|
ChecksumType: ad.ChecksumType,
|
||||||
Size: ad.Size,
|
Size: ad.Size,
|
||||||
Successful: successful,
|
Successful: successful,
|
||||||
|
Parts: ad.Parts,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
|
|||||||
}
|
}
|
||||||
b.log().Info("created backup successfully")
|
b.log().Info("created backup successfully")
|
||||||
|
|
||||||
ad, err := b.Details(ctx)
|
ad, err := b.Details(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
|
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,10 +71,11 @@ func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*Arch
|
|||||||
}
|
}
|
||||||
defer rc.Close()
|
defer rc.Close()
|
||||||
|
|
||||||
if err := s.generateRemoteRequest(ctx, rc); err != nil {
|
parts, err := s.generateRemoteRequest(ctx, rc)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ad, err := s.Details(ctx)
|
ad, err := s.Details(ctx, parts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
|
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
|
||||||
}
|
}
|
||||||
@@ -125,20 +126,20 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generates the remote S3 request and begins the upload.
|
// Generates the remote S3 request and begins the upload.
|
||||||
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) error {
|
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) ([]remote.BackupPart, error) {
|
||||||
defer rc.Close()
|
defer rc.Close()
|
||||||
|
|
||||||
s.log().Debug("attempting to get size of backup...")
|
s.log().Debug("attempting to get size of backup...")
|
||||||
size, err := s.Backup.Size()
|
size, err := s.Backup.Size()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
s.log().WithField("size", size).Debug("got size of backup")
|
s.log().WithField("size", size).Debug("got size of backup")
|
||||||
|
|
||||||
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
||||||
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
s.log().Debug("got S3 upload urls from the Panel")
|
s.log().Debug("got S3 upload urls from the Panel")
|
||||||
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
||||||
@@ -156,22 +157,26 @@ func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to upload the part.
|
// Attempt to upload the part.
|
||||||
if _, err := uploader.uploadPart(ctx, part, partSize); err != nil {
|
etag, err := uploader.uploadPart(ctx, part, partSize)
|
||||||
|
if err != nil {
|
||||||
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
uploader.uploadedParts = append(uploader.uploadedParts, remote.BackupPart{
|
||||||
|
ETag: etag,
|
||||||
|
PartNumber: i + 1,
|
||||||
|
})
|
||||||
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
||||||
|
|
||||||
return nil
|
return uploader.uploadedParts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type s3FileUploader struct {
|
type s3FileUploader struct {
|
||||||
io.ReadCloser
|
io.ReadCloser
|
||||||
client *http.Client
|
client *http.Client
|
||||||
|
uploadedParts []remote.BackupPart
|
||||||
}
|
}
|
||||||
|
|
||||||
// newS3FileUploader returns a new file uploader instance.
|
// newS3FileUploader returns a new file uploader instance.
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mitchellh/colorstring"
|
"github.com/mitchellh/colorstring"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
@@ -17,6 +18,7 @@ import (
|
|||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
const memory = 4 * 1024
|
const memory = 4 * 1024
|
||||||
@@ -28,6 +30,62 @@ var pool = sync.Pool{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Progress is used to track the progress of any I/O operation that are being
|
||||||
|
// performed.
|
||||||
|
type Progress struct {
|
||||||
|
// written is the total size of the files that have been written to the writer.
|
||||||
|
written int64
|
||||||
|
// Total is the total size of the archive in bytes.
|
||||||
|
total int64
|
||||||
|
// w .
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProgress .
|
||||||
|
func NewProgress(total int64) *Progress {
|
||||||
|
return &Progress{total: total}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Written returns the total number of bytes written.
|
||||||
|
// This function should be used when the progress is tracking data being written.
|
||||||
|
func (p *Progress) Written() int64 {
|
||||||
|
return atomic.LoadInt64(&p.written)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total returns the total size in bytes.
|
||||||
|
func (p *Progress) Total() int64 {
|
||||||
|
return atomic.LoadInt64(&p.total)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write totals the number of bytes that have been written to the writer.
|
||||||
|
func (p *Progress) Write(v []byte) (int, error) {
|
||||||
|
n := len(v)
|
||||||
|
atomic.AddInt64(&p.written, int64(n))
|
||||||
|
if p.w != nil {
|
||||||
|
return p.w.Write(v)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Progress returns a formatted progress string for the current progress.
|
||||||
|
func (p *Progress) Progress(width int) string {
|
||||||
|
current := p.Written()
|
||||||
|
total := p.Total()
|
||||||
|
|
||||||
|
// v = 100 (Progress)
|
||||||
|
// size = 1000 (Content-Length)
|
||||||
|
// p / size = 0.1
|
||||||
|
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
||||||
|
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
||||||
|
// 2.5 (Number of ticks as a float64)
|
||||||
|
// 2 (convert to an integer)
|
||||||
|
|
||||||
|
// We have to cast these numbers to float in order to get a float result from the division.
|
||||||
|
ticks := ((float64(current) / float64(total)) * 100) / (float64(100) / float64(width))
|
||||||
|
bar := strings.Repeat("=", int(ticks)) + strings.Repeat(" ", width-int(ticks))
|
||||||
|
return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total)
|
||||||
|
}
|
||||||
|
|
||||||
type Archive struct {
|
type Archive struct {
|
||||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
||||||
// relative to.
|
// relative to.
|
||||||
@@ -40,10 +98,13 @@ type Archive struct {
|
|||||||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
||||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
||||||
Files []string
|
Files []string
|
||||||
|
|
||||||
|
// Progress wraps the writer of the archive to pass through the progress tracker.
|
||||||
|
Progress *Progress
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates an archive at dst with all of the files defined in the
|
// Create creates an archive at dst with all the files defined in the
|
||||||
// included files struct.
|
// included Files array.
|
||||||
func (a *Archive) Create(dst string) error {
|
func (a *Archive) Create(dst string) error {
|
||||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -62,26 +123,34 @@ func (a *Archive) Create(dst string) error {
|
|||||||
writer = f
|
writer = f
|
||||||
}
|
}
|
||||||
|
|
||||||
// The default compression level is BestSpeed
|
|
||||||
var cl = pgzip.BestSpeed
|
|
||||||
|
|
||||||
// Choose which compression level to use based on the compression_level configuration option
|
// Choose which compression level to use based on the compression_level configuration option
|
||||||
|
var compressionLevel int
|
||||||
switch config.Get().System.Backups.CompressionLevel {
|
switch config.Get().System.Backups.CompressionLevel {
|
||||||
case "none":
|
case "none":
|
||||||
cl = pgzip.NoCompression
|
compressionLevel = pgzip.NoCompression
|
||||||
case "best_speed":
|
|
||||||
cl = pgzip.BestSpeed
|
|
||||||
case "best_compression":
|
case "best_compression":
|
||||||
cl = pgzip.BestCompression
|
compressionLevel = pgzip.BestCompression
|
||||||
|
case "best_speed":
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
compressionLevel = pgzip.BestSpeed
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new gzip writer around the file.
|
// Create a new gzip writer around the file.
|
||||||
gw, _ := pgzip.NewWriterLevel(writer, cl)
|
gw, _ := pgzip.NewWriterLevel(writer, compressionLevel)
|
||||||
_ = gw.SetConcurrency(1<<20, 1)
|
_ = gw.SetConcurrency(1<<20, 1)
|
||||||
defer gw.Close()
|
defer gw.Close()
|
||||||
|
|
||||||
|
var pw io.Writer
|
||||||
|
if a.Progress != nil {
|
||||||
|
a.Progress.w = gw
|
||||||
|
pw = a.Progress
|
||||||
|
} else {
|
||||||
|
pw = gw
|
||||||
|
}
|
||||||
|
|
||||||
// Create a new tar writer around the gzip writer.
|
// Create a new tar writer around the gzip writer.
|
||||||
tw := tar.NewWriter(gw)
|
tw := tar.NewWriter(pw)
|
||||||
defer tw.Close()
|
defer tw.Close()
|
||||||
|
|
||||||
// Configure godirwalk.
|
// Configure godirwalk.
|
||||||
@@ -116,7 +185,7 @@ func (a *Archive) Create(dst string) error {
|
|||||||
// being generated.
|
// being generated.
|
||||||
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
||||||
return func(path string, de *godirwalk.Dirent) error {
|
return func(path string, de *godirwalk.Dirent) error {
|
||||||
// Skip directories because we walking them recursively.
|
// Skip directories because we are walking them recursively.
|
||||||
if de.IsDir() {
|
if de.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ import (
|
|||||||
"archive/zip"
|
"archive/zip"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"fmt"
|
"fmt"
|
||||||
gzip2 "github.com/klauspost/compress/gzip"
|
|
||||||
zip2 "github.com/klauspost/compress/zip"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -15,6 +13,9 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
gzip2 "github.com/klauspost/compress/gzip"
|
||||||
|
zip2 "github.com/klauspost/compress/zip"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/mholt/archiver/v3"
|
"github.com/mholt/archiver/v3"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/pterodactyl/wings/internal/vhd"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
@@ -35,18 +37,46 @@ func (ult *usageLookupTime) Get() time.Time {
|
|||||||
return ult.value
|
return ult.value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
|
// MaxDisk returns the maximum amount of disk space that this Filesystem
|
||||||
|
// instance is allowed to use.
|
||||||
func (fs *Filesystem) MaxDisk() int64 {
|
func (fs *Filesystem) MaxDisk() int64 {
|
||||||
return atomic.LoadInt64(&fs.diskLimit)
|
return atomic.LoadInt64(&fs.diskLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets the disk space limit for this Filesystem instance.
|
// SetDiskLimit sets the disk space limit for this Filesystem instance. This
|
||||||
func (fs *Filesystem) SetDiskLimit(i int64) {
|
// logic will also handle mounting or unmounting a virtual disk if it is being
|
||||||
atomic.SwapInt64(&fs.diskLimit, i)
|
// used currently.
|
||||||
|
func (fs *Filesystem) SetDiskLimit(ctx context.Context, i int64) error {
|
||||||
|
// Do nothing if this method is called but the limit is not changing.
|
||||||
|
if atomic.LoadInt64(&fs.diskLimit) == i {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if vhd.Enabled() {
|
||||||
|
if i == 0 && fs.IsVirtual() {
|
||||||
|
fs.log().Debug("disk limit changed to 0, destroying virtual disk")
|
||||||
|
// Remove the VHD if it is mounted so that we're just storing files directly on the system
|
||||||
|
// since we cannot have a virtual disk with a space limit enforced like that.
|
||||||
|
if err := fs.vhd.Destroy(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
fs.vhd = nil
|
||||||
|
}
|
||||||
|
// If we're setting a disk size go ahead and mount the VHD if it isn't already mounted,
|
||||||
|
// and then allocate the new space to the disk.
|
||||||
|
if i > 0 {
|
||||||
|
fs.log().Debug("disk limit updated, allocating new space to virtual disk")
|
||||||
|
if err := fs.ConfigureDisk(ctx, i); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fs.log().WithField("limit", i).Debug("disk limit updated")
|
||||||
|
atomic.StoreInt64(&fs.diskLimit, i)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// The same concept as HasSpaceAvailable however this will return an error if there is
|
// HasSpaceErr is the same concept as HasSpaceAvailable however this will return
|
||||||
// no space, rather than a boolean value.
|
// an error if there is no space, rather than a boolean value.
|
||||||
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
||||||
if !fs.HasSpaceAvailable(allowStaleValue) {
|
if !fs.HasSpaceAvailable(allowStaleValue) {
|
||||||
return newFilesystemError(ErrCodeDiskSpace, nil)
|
return newFilesystemError(ErrCodeDiskSpace, nil)
|
||||||
@@ -54,67 +84,77 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines if the directory a file is trying to be added to has enough space available
|
// HasSpaceAvailable determines if the directory a file is trying to be added to
|
||||||
// for the file to be written to.
|
// has enough space available for the file to be written to.
|
||||||
//
|
//
|
||||||
// Because determining the amount of space being used by a server is a taxing operation we
|
// Because determining the amount of space being used by a server is a taxing
|
||||||
// will load it all up into a cache and pull from that as long as the key is not expired.
|
// operation we will load it all up into a cache and pull from that as long as
|
||||||
|
// the key is not expired. This operation will potentially block unless
|
||||||
|
// allowStaleValue is set to true. See the documentation on DiskUsage for how
|
||||||
|
// this affects the call.
|
||||||
//
|
//
|
||||||
// This operation will potentially block unless allowStaleValue is set to true. See the
|
// If the current size of the disk is larger than the maximum allowed size this
|
||||||
// documentation on DiskUsage for how this affects the call.
|
// function will return false, in all other cases it will return true. We do
|
||||||
|
// not check the existence of a virtual disk at this point since this logic is
|
||||||
|
// used to return friendly error messages to users, and also prevent us wasting
|
||||||
|
// time on more taxing operations when we know the result will end up failing due
|
||||||
|
// to space limits.
|
||||||
|
//
|
||||||
|
// If the servers disk limit is set to 0 it means there is no limit, however the
|
||||||
|
// DiskUsage method is still called to keep the cache warm. This function will
|
||||||
|
// always return true for a server with no limit set.
|
||||||
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
||||||
size, err := fs.DiskUsage(allowStaleValue)
|
size, err := fs.DiskUsage(allowStaleValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
|
fs.log().WithField("error", err).Warn("failed to determine root fs directory size")
|
||||||
|
}
|
||||||
|
return fs.MaxDisk() == 0 || size <= fs.MaxDisk()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
// CachedUsage returns the cached value for the amount of disk space used by the
|
||||||
//
|
// filesystem. Do not rely on this function for critical logical checks. It
|
||||||
// Technically we could skip disk space calculation because we don't need to check if the
|
// should only be used in areas where the actual disk usage does not need to be
|
||||||
// server exceeds its limit but because this method caches the disk usage it would be best
|
// perfect, e.g. API responses for server resource usage.
|
||||||
// to calculate the disk usage and always return true.
|
|
||||||
if fs.MaxDisk() == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return size <= fs.MaxDisk()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the cached value for the amount of disk space used by the filesystem. Do not rely on this
|
|
||||||
// function for critical logical checks. It should only be used in areas where the actual disk usage
|
|
||||||
// does not need to be perfect, e.g. API responses for server resource usage.
|
|
||||||
func (fs *Filesystem) CachedUsage() int64 {
|
func (fs *Filesystem) CachedUsage() int64 {
|
||||||
return atomic.LoadInt64(&fs.diskUsed)
|
return atomic.LoadInt64(&fs.diskUsed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
// DiskUsage is an internal helper function to allow other parts of the codebase
|
||||||
// as needed without overly taxing the system. This will prioritize the value from the cache to avoid
|
// to check the total used disk space as needed without overly taxing the system.
|
||||||
// excessive IO usage. We will only walk the filesystem and determine the size of the directory if there
|
// This will prioritize the value from the cache to avoid excessive IO usage. We
|
||||||
|
// will only walk the filesystem and determine the size of the directory if there
|
||||||
// is no longer a cached value.
|
// is no longer a cached value.
|
||||||
//
|
//
|
||||||
// If "allowStaleValue" is set to true, a stale value MAY be returned to the caller if there is an
|
// If "allowStaleValue" is set to true, a stale value MAY be returned to the
|
||||||
// expired cache value AND there is currently another lookup in progress. If there is no cached value but
|
// caller if there is an expired cache value AND there is currently another
|
||||||
// no other lookup is in progress, a fresh disk space response will be returned to the caller.
|
// lookup in progress. If there is no cached value but no other lookup is in
|
||||||
|
// progress, a fresh disk space response will be returned to the caller.
|
||||||
//
|
//
|
||||||
// This is primarily to avoid a bunch of I/O operations from piling up on the server, especially on servers
|
// This is primarily to avoid a bunch of I/O operations from piling up on the
|
||||||
// with a large amount of files.
|
// server, especially on servers with a large amount of files.
|
||||||
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
|
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
|
||||||
// A disk check interval of 0 means this functionality is completely disabled.
|
// A disk check interval of 0 means this functionality is completely disabled.
|
||||||
if fs.diskCheckInterval == 0 {
|
if fs.diskCheckInterval == 0 {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fs.lastLookupTime.Get().After(time.Now().Add(time.Second * fs.diskCheckInterval * -1)) {
|
since := time.Now().Add(time.Second * fs.diskCheckInterval * -1)
|
||||||
|
// If the last lookup time was before our calculated limit we will re-execute this
|
||||||
|
// checking logic. If the lookup time was after the oldest possible timestamp we will
|
||||||
|
// continue returning the cached value.
|
||||||
|
if fs.lastLookupTime.Get().Before(since) {
|
||||||
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
|
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
|
||||||
// value. This is a blocking operation to the calling process.
|
// value. This is a blocking operation to the calling process.
|
||||||
if !allowStaleValue {
|
if !allowStaleValue {
|
||||||
return fs.updateCachedDiskUsage()
|
return fs.updateCachedDiskUsage()
|
||||||
} else if !fs.lookupInProgress.Load() {
|
}
|
||||||
|
|
||||||
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
|
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
|
||||||
// currently performing a lookup, just do the disk usage calculation in the background.
|
// currently performing a lookup, just do the disk usage calculation in the background.
|
||||||
|
if !fs.lookupInProgress.Load() {
|
||||||
go func(fs *Filesystem) {
|
go func(fs *Filesystem) {
|
||||||
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
||||||
log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
|
fs.log().WithField("error", err).Warn("failed to update fs disk usage from within routine")
|
||||||
}
|
}
|
||||||
}(fs)
|
}(fs)
|
||||||
}
|
}
|
||||||
@@ -194,11 +234,14 @@ func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
|||||||
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to determine if a server has space available for a file of a given size.
|
// HasSpaceFor is a function to determine if a server has space available for a
|
||||||
// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
|
// file of a given size. If space is available, no error will be returned,
|
||||||
// will be raised.
|
// otherwise an ErrNotEnoughSpace error will be raised. If this filesystem is
|
||||||
|
// configured as a virtual disk this function is a no-op as we will fall through
|
||||||
|
// to the native implementation to throw back an error if there is not disk
|
||||||
|
// space available.
|
||||||
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
||||||
if fs.MaxDisk() == 0 {
|
if fs.IsVirtual() || fs.MaxDisk() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
s, err := fs.DiskUsage(true)
|
s, err := fs.DiskUsage(true)
|
||||||
@@ -234,3 +277,7 @@ func (fs *Filesystem) addDisk(i int64) int64 {
|
|||||||
|
|
||||||
return atomic.AddInt64(&fs.diskUsed, i)
|
return atomic.AddInt64(&fs.diskUsed, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) log() *log.Entry {
|
||||||
|
return log.WithField("server", fs.uuid).WithField("root", fs.root)
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/internal/vhd"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,19 +31,23 @@ type Filesystem struct {
|
|||||||
diskUsed int64
|
diskUsed int64
|
||||||
diskCheckInterval time.Duration
|
diskCheckInterval time.Duration
|
||||||
denylist *ignore.GitIgnore
|
denylist *ignore.GitIgnore
|
||||||
|
vhd *vhd.Disk
|
||||||
|
|
||||||
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
|
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
|
||||||
diskLimit int64
|
diskLimit int64
|
||||||
|
|
||||||
// The root data directory path for this Filesystem instance.
|
// The root data directory path for this Filesystem instance.
|
||||||
root string
|
root string
|
||||||
|
uuid string
|
||||||
|
|
||||||
isTest bool
|
isTest bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Filesystem instance for a given server.
|
// New creates a new Filesystem instance for a given server.
|
||||||
func New(root string, size int64, denylist []string) *Filesystem {
|
func New(uuid string, size int64, denylist []string) *Filesystem {
|
||||||
return &Filesystem{
|
root := filepath.Join(config.Get().System.Data, uuid)
|
||||||
|
fs := Filesystem{
|
||||||
|
uuid: uuid,
|
||||||
root: root,
|
root: root,
|
||||||
diskLimit: size,
|
diskLimit: size,
|
||||||
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
||||||
@@ -50,6 +55,15 @@ func New(root string, size int64, denylist []string) *Filesystem {
|
|||||||
lookupInProgress: system.NewAtomicBool(false),
|
lookupInProgress: system.NewAtomicBool(false),
|
||||||
denylist: ignore.CompileIgnoreLines(denylist...),
|
denylist: ignore.CompileIgnoreLines(denylist...),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If VHD support is enabled but this server is configured with no disk size
|
||||||
|
// limit we cannot actually use a virtual disk. In that case fall back to using
|
||||||
|
// the default driver.
|
||||||
|
if vhd.Enabled() && size > 0 {
|
||||||
|
fs.vhd = vhd.New(size, vhd.DiskPath(uuid), fs.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &fs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path returns the root path for the Filesystem instance.
|
// Path returns the root path for the Filesystem instance.
|
||||||
@@ -77,9 +91,9 @@ func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
|
|||||||
return f, st, nil
|
return f, st, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Acts by creating the given file and path on the disk if it is not present already. If
|
// Touch acts by creating the given file and path on the disk if it is not present
|
||||||
// it is present, the file is opened using the defaults which will truncate the contents.
|
// already. If it is present, the file is opened using the defaults which will
|
||||||
// The opened file is then returned to the caller.
|
// truncate the contents. The opened file is then returned to the caller.
|
||||||
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
||||||
cleaned, err := fs.SafePath(p)
|
cleaned, err := fs.SafePath(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -155,6 +169,12 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
|||||||
|
|
||||||
buf := make([]byte, 1024*4)
|
buf := make([]byte, 1024*4)
|
||||||
sz, err := io.CopyBuffer(file, r, buf)
|
sz, err := io.CopyBuffer(file, r, buf)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "no space left on device") {
|
||||||
|
return newFilesystemError(ErrCodeDiskSpace, err)
|
||||||
|
}
|
||||||
|
return errors.WrapIf(err, "filesystem: failed to copy buffer for file write")
|
||||||
|
}
|
||||||
|
|
||||||
// Adjust the disk usage to account for the old size and the new size of the file.
|
// Adjust the disk usage to account for the old size and the new size of the file.
|
||||||
fs.addDisk(sz - currentSize)
|
fs.addDisk(sz - currentSize)
|
||||||
@@ -312,8 +332,9 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
|
|||||||
return name + suffix + extension, nil
|
return name + suffix + extension, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copies a given file to the same location and appends a suffix to the file to indicate that
|
// Copy takes a given input file path and creates a copy of the file at the same
|
||||||
// it has been copied.
|
// location, appending a unique number to the end. For example, a copy of "test.txt"
|
||||||
|
// would create "test 2.txt" as the copy, then "test 3.txt" and so on.
|
||||||
func (fs *Filesystem) Copy(p string) error {
|
func (fs *Filesystem) Copy(p string) error {
|
||||||
cleaned, err := fs.SafePath(p)
|
cleaned, err := fs.SafePath(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
42
server/filesystem/virtual.go
Normal file
42
server/filesystem/virtual.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"github.com/pterodactyl/wings/internal/vhd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsVirtual returns true if the filesystem is currently using a virtual disk.
|
||||||
|
func (fs *Filesystem) IsVirtual() bool {
|
||||||
|
return fs.vhd != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigureDisk will attempt to create a new VHD if there is not one already
|
||||||
|
// created for the filesystem. If there is this method will attempt to resize
|
||||||
|
// the underlying data volume. Passing a size of 0 or less will panic.
|
||||||
|
func (fs *Filesystem) ConfigureDisk(ctx context.Context, size int64) error {
|
||||||
|
if size <= 0 {
|
||||||
|
panic("filesystem: attempt to configure disk with empty size")
|
||||||
|
}
|
||||||
|
if fs.vhd == nil {
|
||||||
|
fs.vhd = vhd.New(size, vhd.DiskPath(fs.uuid), fs.root)
|
||||||
|
if err := fs.MountDisk(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Resize the disk now that it is for sure mounted and exists on the system.
|
||||||
|
if err := fs.vhd.Resize(ctx, size); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MountDisk will attempt to mount the underlying virtual disk for the server.
|
||||||
|
// If the disk is already mounted this is a no-op function.
|
||||||
|
func (fs *Filesystem) MountDisk(ctx context.Context) error {
|
||||||
|
err := fs.vhd.Mount(ctx)
|
||||||
|
if errors.Is(err, vhd.ErrFilesystemMounted) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.WrapIf(err, "filesystem: failed to mount VHD")
|
||||||
|
}
|
||||||
@@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
@@ -449,6 +450,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
|||||||
},
|
},
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||||
|
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the root directory for the server exists properly before attempting
|
// Ensure the root directory for the server exists properly before attempting
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -25,14 +24,16 @@ import (
|
|||||||
type Manager struct {
|
type Manager struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
client remote.Client
|
client remote.Client
|
||||||
|
skipVhdInitialization bool
|
||||||
servers []*Server
|
servers []*Server
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager returns a new server manager instance. This will boot up all the
|
// NewManager returns a new server manager instance. This will boot up all the
|
||||||
// servers that are currently present on the filesystem and set them into the
|
// servers that are currently present on the filesystem and set them into the
|
||||||
// manager.
|
// manager.
|
||||||
func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
|
func NewManager(ctx context.Context, client remote.Client, skipVhdInit bool) (*Manager, error) {
|
||||||
m := NewEmptyManager(client)
|
m := NewEmptyManager(client)
|
||||||
|
m.skipVhdInitialization = skipVhdInit
|
||||||
if err := m.init(ctx); err != nil {
|
if err := m.init(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -184,7 +185,7 @@ func (m *Manager) ReadStates() (map[string]string, error) {
|
|||||||
// InitServer initializes a server using a data byte array. This will be
|
// InitServer initializes a server using a data byte array. This will be
|
||||||
// marshaled into the given struct using a YAML marshaler. This will also
|
// marshaled into the given struct using a YAML marshaler. This will also
|
||||||
// configure the given environment for a server.
|
// configure the given environment for a server.
|
||||||
func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server, error) {
|
func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfigurationResponse) (*Server, error) {
|
||||||
s, err := New(m.client)
|
s, err := New(m.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -196,7 +197,15 @@ func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server,
|
|||||||
return nil, errors.WithStackIf(err)
|
return nil, errors.WithStackIf(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
s.fs = filesystem.New(s.ID(), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
||||||
|
// If this is a virtual filesystem we need to go ahead and mount the disk
|
||||||
|
// so that everything is accessible.
|
||||||
|
if s.fs.IsVirtual() && !m.skipVhdInitialization {
|
||||||
|
log.WithField("server", s.ID()).Info("mounting virtual disk for server")
|
||||||
|
if err := s.fs.MountDisk(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||||
// this logic in. When we're ready to support other environment we'll need to make
|
// this logic in. When we're ready to support other environment we'll need to make
|
||||||
@@ -258,7 +267,7 @@ func (m *Manager) init(ctx context.Context) error {
|
|||||||
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
|
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s, err := m.InitServer(d)
|
s, err := m.InitServer(ctx, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
|
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -179,6 +179,8 @@ func (s *Server) Log() *log.Entry {
|
|||||||
//
|
//
|
||||||
// This also means mass actions can be performed against servers on the Panel
|
// This also means mass actions can be performed against servers on the Panel
|
||||||
// and they will automatically sync with Wings when the server is started.
|
// and they will automatically sync with Wings when the server is started.
|
||||||
|
//
|
||||||
|
// TODO: accept a context value rather than using the server's context.
|
||||||
func (s *Server) Sync() error {
|
func (s *Server) Sync() error {
|
||||||
cfg, err := s.client.GetServerConfiguration(s.Context(), s.ID())
|
cfg, err := s.client.GetServerConfiguration(s.Context(), s.ID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -194,7 +196,9 @@ func (s *Server) Sync() error {
|
|||||||
|
|
||||||
// Update the disk space limits for the server whenever the configuration for
|
// Update the disk space limits for the server whenever the configuration for
|
||||||
// it changes.
|
// it changes.
|
||||||
s.fs.SetDiskLimit(s.DiskSpace())
|
if err := s.fs.SetDiskLimit(s.Context(), s.DiskSpace()); err != nil {
|
||||||
|
return errors.WrapIf(err, "server: failed to sync server configuration from API")
|
||||||
|
}
|
||||||
|
|
||||||
s.SyncWithEnvironment()
|
s.SyncWithEnvironment()
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package sftp
|
|||||||
import (
|
import (
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/internal/database"
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
"github.com/pterodactyl/wings/internal/models"
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
package system
|
package system
|
||||||
|
|
||||||
var Version = "1.7.1"
|
var Version = "develop"
|
||||||
|
|||||||
Reference in New Issue
Block a user