Compare commits

..

1 Commits

Author SHA1 Message Date
Pterodactyl CI
9c0c0239ff bump version for release 2022-05-30 00:51:22 +00:00
69 changed files with 633 additions and 2556 deletions

3
.github/FUNDING.yml vendored
View File

@@ -1 +1,2 @@
github: [ matthewpi ]
github: [ DaneEveritt ]
custom: [ "https://paypal.me/PterodactylSoftware" ]

View File

@@ -12,7 +12,7 @@ jobs:
fail-fast: false
matrix:
os: [ ubuntu-20.04 ]
go: [ '1.18.7' ]
go: [ '^1.17' ]
goos: [ linux ]
goarch: [ amd64, arm64 ]
runs-on: ${{ matrix.os }}
@@ -58,6 +58,7 @@ jobs:
run: |
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH} wings.go
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH}_debug wings.go
upx build/wings_${GOOS}_${{ matrix.goarch }}
chmod +x build/*
- name: Tests
run: go test -race ./...

View File

@@ -11,7 +11,7 @@ jobs:
uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.18.7'
go-version: '^1.17'
- name: Build
env:
REF: ${{ github.ref }}
@@ -22,8 +22,8 @@ jobs:
run: go test ./...
- name: Compress binary and make it executable
run: |
chmod +x build/wings_linux_amd64
chmod +x build/wings_linux_arm64
upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
upx build/wings_linux_arm64 && chmod +x build/wings_linux_arm64
- name: Extract changelog
env:
REF: ${{ github.ref }}

View File

@@ -1,42 +1,5 @@
# Changelog
## v1.7.2
### Fixed
* The S3 backup driver now supports Cloudflare R2
### Added
* During a server transfer, there is a new "Archiving" status that outputs the progress of creating the server transfer archive.
* Adds a configuration option to control the list of trusted proxies that can be used to determine the client IP address.
* Adds a configuration option to control the Docker username space setting when Wings creates containers.
### Changed
* Releases are now built using `Go 1.18` — the minimum version required to build Wings is now `Go 1.18`.
## v1.7.1
### Fixed
* YAML parser has been updated to fix some strange issues
### Added
* Added `Force Outgoing IP` option for servers to ensure outgoing traffic uses the server's IP address
* Adds an option to control the level of gzip compression for backups
## v1.7.0
### Fixed
* Fixes multi-platform support for Wings' Docker image.
### Added
* Adds support for tracking of SFTP actions, power actions, server commands, and file uploads by utilizing a local SQLite database and processing events before sending them to the Panel.
* Adds support for configuring the MTU on the `pterodactyl0` network.
## v1.6.4
### Fixed
* Fixes a bug causing CPU limiting to not be properly applied to servers.
* Fixes a bug causing zip archives to decompress without taking into account nested folder structures.
## v1.6.3
### Fixed
* Fixes SFTP authentication failing for administrative users due to a permissions adjustment on the Panel.
## v1.6.2
### Fixed
* Fixes file upload size not being properly enforced.

View File

@@ -1,18 +1,19 @@
# Stage 1 (Build)
FROM golang:1.18-alpine AS builder
FROM --platform=$BUILDPLATFORM golang:1.17-alpine AS builder
ARG VERSION
RUN apk add --update --no-cache git make
RUN apk add --update --no-cache git make upx
WORKDIR /app/
COPY go.mod go.sum /app/
RUN go mod download
COPY . /app/
RUN CGO_ENABLED=0 go build \
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
-ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=$VERSION" \
-v \
-trimpath \
-o wings \
wings.go
RUN upx wings
RUN echo "ID=\"distroless\"" > /etc/os-release
# Stage 2 (Final)

View File

@@ -4,9 +4,6 @@ build:
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_amd64 -v wings.go
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
race:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
debug:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1
@@ -17,6 +14,9 @@ rmdebug:
go build -gcflags "all=-N -l" -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml
compress:
upx --brute build/wings_*
cross-build: clean build compress
clean:

View File

@@ -14,22 +14,27 @@ dependencies, and allowing users to authenticate with the same credentials they
## Sponsors
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
[Interested in becoming a sponsor?](https://github.com/sponsors/DaneEveritt)
| Company | About |
| ------- | ----- |
| [**WISP**](https://wisp.gg) | Extra features. |
| [**MixmlHosting**](https://mixmlhosting.com) | MixmlHosting provides high quality Virtual Private Servers along with game servers, all at a affordable price. |
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
| [**Fragnet**](https://fragnet.net) | Providing low latency, high-end game hosting solutions to gamers, game studios and eSports platforms. |
| [**Tempest**](https://tempest.net/) | Tempest Hosting is a subsidiary of Path Network, Inc. offering unmetered DDoS protected 10Gbps dedicated servers, starting at just $80/month. Full anycast, tons of filters. |
| [**Bloom.host**](https://bloom.host) | Bloom.host offers dedicated core VPS and Minecraft hosting with Ryzen 9 processors. With owned-hardware, we offer truly unbeatable prices on high-performance hosting. |
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
| [**MineStrator**](https://minestrator.com/) | Looking for a French highend hosting company for you minecraft server? More than 14,000 members on our discord, trust us. |
| [**DedicatedMC**](https://dedicatedmc.io/) | DedicatedMC provides Raw Power hosting at affordable pricing, making sure to never compromise on your performance and giving you the best performance money can buy. |
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
| [**XCORE**](https://xcore-server.de/) | XCORE offers High-End Servers for hosting and gaming since 2012. Fast, excellent and well-known for eSports Gaming. |
| [**RoyaleHosting**](https://royalehosting.net/) | Build your dreams and deploy them with RoyaleHostings reliable servers and network. Easy to use, provisioned in a couple of minutes. |
| [**Spill Hosting**](https://spillhosting.no/) | Spill Hosting is a Norwegian hosting service, which aims for inexpensive services on quality servers. Premium i9-9900K processors will run your game like a dream. |
| [**DeinServerHost**](https://deinserverhost.de/) | DeinServerHost offers Dedicated, vps and Gameservers for many popular Games like Minecraft and Rust in Germany since 2013. |
| [**HostBend**](https://hostbend.com/) | HostBend offers a variety of solutions for developers, students, and others who have a tight budget but don't want to compromise quality and support. |
| [**Capitol Hosting Solutions**](https://chs.gg/) | CHS is *the* budget friendly hosting company for Australian and American gamers, offering a variety of plans from Web Hosting to Game Servers; Custom Solutions too! |
| [**ByteAnia**](https://byteania.com/?utm_source=pterodactyl) | ByteAnia offers the best performing and most affordable **Ryzen 5000 Series hosting** on the market for *unbeatable prices*! |
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
| [**HostEZ**](https://hostez.io) | Providing North America Valheim, Minecraft and other popular games with low latency, high uptime and maximum availability. EZ! |
| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa.|
| [**Gamenodes**](https://gamenodes.nl) | Gamenodes love quality. For Minecraft, Discord Bots and other services, among others. With our own programmers, we provide just that little bit of extra service! |
| [**RocketNode**](https://rocketnode.net) | RocketNode is a VPS and Game Server provider that offers the best performing VPS and Game hosting Solutions at affordable prices! |
## Documentation
* [Panel Documentation](https://pterodactyl.io/panel/1.0/getting_started.html)

View File

@@ -58,7 +58,7 @@ func newDiagnosticsCommand() *cobra.Command {
return command
}
// diagnosticsCmdRun collects diagnostics about wings, its configuration and the node.
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
// We collect:
// - wings and docker versions
// - relevant parts of daemon configuration

View File

@@ -1,127 +0,0 @@
package cmd
import (
"context"
"os"
"os/exec"
"strings"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/vhd"
"github.com/pterodactyl/wings/loggers/cli"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server"
"github.com/spf13/cobra"
)
type MigrateVHDCommand struct {
manager *server.Manager
}
func newMigrateVHDCommand() *cobra.Command {
return &cobra.Command{
Use: "migrate-vhd",
Short: "migrates existing data from a directory tree into virtual hard-disks",
PreRun: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.DebugLevel)
log.SetHandler(cli.Default)
},
Run: func(cmd *cobra.Command, args []string) {
client := remote.NewFromConfig(config.Get())
manager, err := server.NewManager(cmd.Context(), client, true)
if err != nil {
log.WithField("error", err).Fatal("failed to create new server manager")
}
c := &MigrateVHDCommand{
manager: manager,
}
if err := c.Run(cmd.Context()); err != nil {
log.WithField("error", err).Fatal("failed to execute command")
}
},
}
}
// Run executes the migration command.
func (m *MigrateVHDCommand) Run(ctx context.Context) error {
if !vhd.Enabled() {
return errors.New("cannot migrate to vhd: the underlying driver must be set to \"vhd\"")
}
for _, s := range m.manager.All() {
s.Log().Debug("starting migration of server contents to virtual disk...")
v := vhd.New(s.DiskSpace(), vhd.DiskPath(s.ID()), s.Filesystem().Path())
s.Log().WithField("disk_image", v.Path()).Info("creating virtual disk for server")
if err := v.Allocate(ctx); err != nil {
return errors.WithStackIf(err)
}
s.Log().Info("creating virtual filesystem for server")
if err := v.MakeFilesystem(ctx); err != nil {
// If the filesystem already exists no worries, just move on with our
// day here.
if !errors.Is(err, vhd.ErrFilesystemExists) {
return errors.WithStack(err)
}
}
bak := strings.TrimSuffix(s.Filesystem().Path(), "/") + "_bak"
mounted, err := v.IsMounted(ctx)
if err != nil {
return err
} else if !mounted {
s.Log().WithField("backup_dir", bak).Debug("virtual disk is not yet mounted, creating backup directory")
// Create a backup directory of the server files if one does not already exist
// at that location. If one does exists we'll just assume it is good to go and
// rely on it to provide the files we'll need.
if _, err := os.Lstat(bak); os.IsNotExist(err) {
if err := os.Rename(s.Filesystem().Path(), bak); err != nil {
return errors.Wrap(err, "failed to rename existing data directory for backup")
}
} else if err != nil {
return errors.WithStack(err)
}
if err := os.RemoveAll(s.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "failed to remove base server files path")
}
} else {
s.Log().Warn("server appears to already have existing mount, not creating data backup")
}
// Attempt to mount the disk at the expected path now that we've created
// a backup of the server files.
if err := v.Mount(ctx); err != nil && !errors.Is(err, vhd.ErrFilesystemMounted) {
return errors.WithStackIf(err)
}
// Copy over the files from the backup for this server but only
// if we have a backup directory currently.
_, err = os.Lstat(bak)
if err != nil {
if !os.IsNotExist(err) {
s.Log().WithField("error", err).Warn("failed to stat backup directory")
} else {
s.Log().Info("no backup data directory exists, not restoring files")
}
} else {
cmd := exec.CommandContext(ctx, "cp", "-r", bak+"/.", s.Filesystem().Path())
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "migrate: failed to move old server files into new direcotry")
} else {
if err := os.RemoveAll(bak); err != nil {
s.Log().WithField("directory", bak).WithField("error", err).Warn("failed to remove backup directory")
}
}
}
s.Log().Info("updating server file ownership...")
if err := s.Filesystem().Chown("/"); err != nil {
s.Log().WithField("error", err).Warn("failed to update ownership of new server files")
}
s.Log().Info("finished migration to virtual disk...")
}
return nil
}

View File

@@ -16,9 +16,6 @@ import (
"strings"
"time"
"github.com/pterodactyl/wings/internal/cron"
"github.com/pterodactyl/wings/internal/database"
"github.com/NYTimes/logrotate"
"github.com/apex/log"
"github.com/apex/log/handlers/multi"
@@ -47,16 +44,8 @@ var (
var rootCommand = &cobra.Command{
Use: "wings",
Short: "Runs the API server allowing programmatic control of game servers for Pterodactyl Panel.",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
initConfig()
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
},
PreRun: func(cmd *cobra.Command, args []string) {
initConfig()
initLogging()
if tls, _ := cmd.Flags().GetBool("auto-tls"); tls {
if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" {
@@ -85,19 +74,18 @@ func Execute() {
func init() {
rootCommand.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
rootCommand.PersistentFlags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
// Flags specifically used when running the API.
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
rootCommand.AddCommand(versionCommand)
rootCommand.AddCommand(configureCmd)
rootCommand.AddCommand(newDiagnosticsCommand())
rootCommand.AddCommand(newMigrateVHDCommand())
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
@@ -105,6 +93,13 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
log.Debug("running in debug mode")
log.WithField("config_file", configPath).Info("loading configuration from file")
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
if err := config.ConfigureTimezone(); err != nil {
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
}
@@ -135,11 +130,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
}),
)
if err := database.Initialize(); err != nil {
log.WithField("error", err).Fatal("failed to initialize database")
}
manager, err := server.NewManager(cmd.Context(), pclient, false)
manager, err := server.NewManager(cmd.Context(), pclient)
if err != nil {
log.WithField("error", err).Fatal("failed to load server configurations")
}
@@ -165,7 +156,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
ticker := time.NewTicker(time.Minute)
// Every minute, write the current server states to the disk to allow for a more
// seamless hard-reboot process in which wings will re-sync server states based
// on its last tracked state.
// on it's last tracked state.
go func() {
for {
select {
@@ -268,13 +259,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
}
}()
if s, err := cron.Scheduler(cmd.Context(), manager); err != nil {
log.WithField("error", err).Fatal("failed to initialize cron system")
} else {
log.WithField("subsystem", "cron").Info("starting cron processes")
s.StartAsync()
}
go func() {
// Run the SFTP server.
if err := sftp.New(manager).Run(); err != nil {

View File

@@ -91,9 +91,6 @@ type ApiConfiguration struct {
// The maximum size for files uploaded through the Panel in MB.
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
// A list of IP address of proxies that may send a X-Forwarded-For header to set the true clients IP
TrustedProxies []string `json:"trusted_proxies" yaml:"trusted_proxies"`
}
// RemoteQueryConfiguration defines the configuration settings for remote requests
@@ -166,15 +163,6 @@ type SystemConfiguration struct {
// disk usage is not a concern.
DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"`
// ActivitySendInterval is the amount of time that should ellapse between aggregated server activity
// being sent to the Panel. By default this will send activity collected over the last minute. Keep
// in mind that only a fixed number of activity log entries, defined by ActivitySendCount, will be sent
// in each run.
ActivitySendInterval int `default:"60" yaml:"activity_send_interval"`
// ActivitySendCount is the number of activity events to send per batch.
ActivitySendCount int `default:"100" yaml:"activity_send_count"`
// If set to true, file permissions for a server will be checked when the process is
// booted. This can cause boot delays if the server has a large amount of files. In most
// cases disabling this should not have any major impact unless external processes are
@@ -222,15 +210,6 @@ type Backups struct {
//
// Defaults to 0 (unlimited)
WriteLimit int `default:"0" yaml:"write_limit"`
// CompressionLevel determines how much backups created by wings should be compressed.
//
// "none" -> no compression will be applied
// "best_speed" -> uses gzip level 1 for fast speed
// "best_compression" -> uses gzip level 9 for minimal disk space useage
//
// Defaults to "best_speed" (level 1)
CompressionLevel string `default:"best_speed" yaml:"compression_level"`
}
type Transfers struct {
@@ -305,11 +284,6 @@ type Configuration struct {
// is only required by users running Wings without SSL certificates and using internal IP
// addresses in order to connect. Most users should NOT enable this setting.
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
// Servers contains all of the settings that are used when configuring individual servers
// on the system. This is a global configuration for all server instances, not to be confused
// with the per-server configurations provided by the Panel API.
Servers Servers `json:"servers" yaml:"servers"`
}
// NewAtPath creates a new struct and set the path where it should be stored.

View File

@@ -36,7 +36,6 @@ type DockerNetworkConfiguration struct {
Mode string `default:"pterodactyl_nw" yaml:"network_mode"`
IsInternal bool `default:"false" yaml:"is_internal"`
EnableICC bool `default:"true" yaml:"enable_icc"`
NetworkMTU int64 `default:"1500" yaml:"network_mtu"`
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
}
@@ -78,14 +77,6 @@ type DockerConfiguration struct {
Overhead Overhead `json:"overhead" yaml:"overhead"`
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
// Sets the user namespace mode for the container when user namespace remapping option is
// enabled.
//
// If the value is blank, the daemon's user namespace remapping configuration is used,
// if the value is "host", then the pterodactyl containers are started with user namespace
// remapping disabled
UsernsMode string `default:"" json:"userns_mode" yaml:"userns_mode"`
}
// RegistryConfiguration defines the authentication credentials for a given

View File

@@ -1,28 +0,0 @@
package config
type FSDriver string
const (
FSDriverLocal FSDriver = "local"
FSDriverVHD FSDriver = "vhd"
)
type Servers struct {
// Filesystem defines all of the filesystem specific settings used for servers.
Filesystem Filesystem `json:"filesystem" yaml:"filesystem"`
}
type Filesystem struct {
// Driver defines the underlying filesystem driver that is used when a server is
// created on the system. This currently supports either of the following drivers:
//
// local: the local driver is the default one used by Wings. This offloads all of the
// disk limit enforcement to Wings itself. This has a performance impact but is
// the most compatiable with all systems.
// vhd: the vhd driver uses "virtual" disks on the host system to enforce disk limits
// on the server. This is more performant since calculations do not need to be made
// by Wings itself when enforcing limits. It also avoids vulnerabilities that exist
// in the local driver which allow malicious processes to quickly create massive files
// before Wings is able to detect and stop them from being written.
Driver FSDriver `default:"local" json:"driver" yaml:"driver"`
}

View File

@@ -12,11 +12,6 @@ import (
// Defines the allocations available for a given server. When using the Docker environment
// driver these correspond to mappings for the container that allow external connections.
type Allocations struct {
// ForceOutgoingIP causes a dedicated bridge network to be created for the
// server with a special option, causing Docker to SNAT outgoing traffic to
// the DefaultMapping's IP. This is important to servers which rely on external
// services that check the IP of the server (Source Engine servers, for example).
ForceOutgoingIP bool `json:"force_outgoing_ip"`
// Defines the default allocation that should be used for this server. This is
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
// files or the startup arguments for a server.

View File

@@ -41,12 +41,12 @@ func ConfigureDocker(ctx context.Context) error {
nw := config.Get().Docker.Network
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
if err != nil {
if !client.IsErrNotFound(err) {
return err
}
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
if err := createDockerNetwork(ctx, cli); err != nil {
if client.IsErrNotFound(err) {
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
if err := createDockerNetwork(ctx, cli); err != nil {
return err
}
} else {
return err
}
}
@@ -92,7 +92,7 @@ func createDockerNetwork(ctx context.Context, cli *client.Client) error {
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "pterodactyl0",
"com.docker.network.driver.mtu": strconv.FormatInt(nw.NetworkMTU, 10),
"com.docker.network.driver.mtu": "1500",
},
})
if err != nil {

View File

@@ -14,7 +14,6 @@ import (
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
)

View File

@@ -147,12 +147,10 @@ func (e *Environment) InSituUpdate() error {
// currently available for it. If the container already exists it will be
// returned.
func (e *Environment) Create() error {
ctx := context.Background()
// If the container already exists don't hit the user with an error, just return
// the current information about it which is what we would do when creating the
// container anyways.
if _, err := e.ContainerInspect(ctx); err == nil {
if _, err := e.ContainerInspect(context.Background()); err == nil {
return nil
} else if !client.IsErrNotFound(err) {
return errors.Wrap(err, "environment/docker: failed to inspect container")
@@ -192,34 +190,7 @@ func (e *Environment) Create() error {
},
}
networkMode := container.NetworkMode(config.Get().Docker.Network.Mode)
if a.ForceOutgoingIP {
e.log().Debug("environment/docker: forcing outgoing IP address")
networkName := strings.ReplaceAll(e.Id, "-", "")
networkMode = container.NetworkMode(networkName)
if _, err := e.client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}); err != nil {
if !client.IsErrNotFound(err) {
return err
}
if _, err := e.client.NetworkCreate(ctx, networkName, types.NetworkCreate{
Driver: "bridge",
EnableIPv6: false,
Internal: false,
Attachable: false,
Ingress: false,
ConfigOnly: false,
Options: map[string]string{
"encryption": "false",
"com.docker.network.bridge.default_bridge": "false",
"com.docker.network.host_ipv4": a.DefaultMapping.Ip,
},
}); err != nil {
return err
}
}
}
tmpfsSize := strconv.Itoa(int(config.Get().Docker.TmpfsSize))
hostConf := &container.HostConfig{
PortBindings: a.DockerBindings(),
@@ -231,7 +202,7 @@ func (e *Environment) Create() error {
// Configure the /tmp folder mapping in containers. This is necessary for some
// games that need to make use of it for downloads and other installation processes.
Tmpfs: map[string]string{
"/tmp": "rw,exec,nosuid,size=" + strconv.Itoa(int(config.Get().Docker.TmpfsSize)) + "M",
"/tmp": "rw,exec,nosuid,size=" + tmpfsSize + "M",
},
// Define resource limits for the container based on the data passed through
@@ -260,11 +231,10 @@ func (e *Environment) Create() error {
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
},
NetworkMode: networkMode,
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
}
if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil {
return errors.Wrap(err, "environment/docker: failed to create container")
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote"

View File

@@ -118,7 +118,7 @@ func (l Limits) AsContainerResources() container.Resources {
// @see https://github.com/pterodactyl/panel/issues/3988
if l.CpuLimit > 0 {
resources.CPUQuota = l.CpuLimit * 1_000
resources.CPUPeriod = 100_000
resources.CPUPeriod = 100_00
resources.CPUShares = 1024
}

View File

@@ -5,7 +5,6 @@ import (
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
)

114
go.mod
View File

@@ -1,125 +1,115 @@
module github.com/pterodactyl/wings
go 1.18
go 1.17
require (
emperror.dev/errors v0.8.1
github.com/AlecAivazis/survey/v2 v2.3.6
github.com/AlecAivazis/survey/v2 v2.3.4
github.com/Jeffail/gabs/v2 v2.6.1
github.com/NYTimes/logrotate v1.0.0
github.com/apex/log v1.9.0
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/beevik/etree v1.1.0
github.com/buger/jsonparser v1.1.1
github.com/cenkalti/backoff/v4 v4.1.3
github.com/cenkalti/backoff/v4 v4.1.2
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
github.com/creasty/defaults v1.6.0
github.com/docker/docker v20.10.18+incompatible
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.14+incompatible
github.com/docker/go-connections v0.4.0
github.com/fatih/color v1.13.0
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
github.com/gabriel-vasile/mimetype v1.4.1
github.com/gammazero/workerpool v1.1.3
github.com/gabriel-vasile/mimetype v1.4.0
github.com/gammazero/workerpool v1.1.2
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gin-gonic/gin v1.8.1
github.com/glebarez/sqlite v1.4.8
github.com/go-co-op/gocron v1.17.0
github.com/goccy/go-json v0.9.11
github.com/gin-gonic/gin v1.7.7
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.5.0
github.com/iancoleman/strcase v0.2.0
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
github.com/juju/ratelimit v1.0.2
github.com/karrick/godirwalk v1.17.0
github.com/klauspost/compress v1.15.11
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
github.com/juju/ratelimit v1.0.1
github.com/karrick/godirwalk v1.16.1
github.com/klauspost/pgzip v1.2.5
github.com/magiconair/properties v1.8.6
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-colorable v0.1.12
github.com/mholt/archiver/v3 v3.5.1
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/sftp v1.13.5
github.com/pkg/sftp v1.13.4
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/spf13/afero v1.9.2
github.com/spf13/cobra v1.5.0
github.com/stretchr/testify v1.8.0
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
gopkg.in/ini.v1 v1.67.0
github.com/spf13/cobra v1.4.0
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
gopkg.in/ini.v1 v1.66.4
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
gorm.io/gorm v1.23.10
)
require github.com/goccy/go-json v0.9.6
require golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/containerd v1.6.2 // indirect
github.com/containerd/fifo v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gammazero/deque v0.2.0 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/gammazero/deque v0.1.1 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/glebarez/go-sqlite v1.19.1 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.11.1 // indirect
github.com/go-playground/validator/v10 v10.10.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/gorilla/mux v1.7.4 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.15.1 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/magefile/mage v1.14.0 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/magefile/mage v1.13.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nwaples/rardecode v1.1.3 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pierrec/lz4/v4 v4.1.14 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/ugorji/go/codec v1.2.7 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
golang.org/x/text v0.3.8 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.1.12 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.28.1 // indirect
modernc.org/libc v1.20.0 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect
modernc.org/sqlite v1.19.1 // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
google.golang.org/grpc v1.45.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

522
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,6 @@ import (
"emperror.dev/errors"
"github.com/asaskevich/govalidator"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server"
)
@@ -38,7 +37,7 @@ func New(ctx context.Context, manager *server.Manager, details ServerDetails) (*
// Create a new server instance using the configuration we wrote to the disk
// so that everything gets instantiated correctly on the struct.
s, err := manager.InitServer(ctx, c)
s, err := manager.InitServer(c)
if err != nil {
return nil, errors.WrapIf(err, "installer: could not init server instance")
}

View File

@@ -1,59 +0,0 @@
package cron
import (
"context"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
)
type activityCron struct {
mu *system.AtomicBool
manager *server.Manager
max int
}
// Run executes the cronjob and ensures we fetch and send all of the stored activity to the
// Panel instance. Once activity is sent it is deleted from the local database instance. Any
// SFTP specific events are not handled in this cron, they're handled seperately to account
// for de-duplication and event merging.
func (ac *activityCron) Run(ctx context.Context) error {
// Don't execute this cron if there is currently one running. Once this task is completed
// go ahead and mark it as no longer running.
if !ac.mu.SwapIf(true) {
return errors.WithStack(ErrCronRunning)
}
defer ac.mu.Store(false)
var activity []models.Activity
tx := database.Instance().WithContext(ctx).
Where("event NOT LIKE ?", "server:sftp.%").
Limit(ac.max).
Find(&activity)
if tx.Error != nil {
return errors.WithStack(tx.Error)
}
if len(activity) == 0 {
return nil
}
if err := ac.manager.Client().SendActivityLogs(ctx, activity); err != nil {
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
}
var ids []int
for _, v := range activity {
ids = append(ids, v.ID)
}
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
if tx.Error != nil {
return errors.WithStack(tx.Error)
}
return nil
}

View File

@@ -1,73 +0,0 @@
package cron
import (
"context"
"time"
"emperror.dev/errors"
log2 "github.com/apex/log"
"github.com/go-co-op/gocron"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
)
const ErrCronRunning = errors.Sentinel("cron: job already running")
var o system.AtomicBool
// Scheduler configures the internal cronjob system for Wings and returns the scheduler
// instance to the caller. This should only be called once per application lifecycle, additional
// calls will result in an error being returned.
func Scheduler(ctx context.Context, m *server.Manager) (*gocron.Scheduler, error) {
if !o.SwapIf(true) {
return nil, errors.New("cron: cannot call scheduler more than once in application lifecycle")
}
l, err := time.LoadLocation(config.Get().System.Timezone)
if err != nil {
return nil, errors.Wrap(err, "cron: failed to parse configured system timezone")
}
activity := activityCron{
mu: system.NewAtomicBool(false),
manager: m,
max: config.Get().System.ActivitySendCount,
}
sftp := sftpCron{
mu: system.NewAtomicBool(false),
manager: m,
max: config.Get().System.ActivitySendCount,
}
s := gocron.NewScheduler(l)
log := log2.WithField("subsystem", "cron")
interval := time.Duration(config.Get().System.ActivitySendInterval) * time.Second
log.WithField("interval", interval).Info("configuring system crons")
_, _ = s.Tag("activity").Every(interval).Do(func() {
log.WithField("cron", "activity").Debug("sending internal activity events to Panel")
if err := activity.Run(ctx); err != nil {
if errors.Is(err, ErrCronRunning) {
log.WithField("cron", "activity").Warn("activity process is already running, skipping...")
} else {
log.WithField("cron", "activity").WithField("error", err).Error("activity process failed to execute")
}
}
})
_, _ = s.Tag("sftp").Every(interval).Do(func() {
log.WithField("cron", "sftp").Debug("sending sftp events to Panel")
if err := sftp.Run(ctx); err != nil {
if errors.Is(err, ErrCronRunning) {
log.WithField("cron", "sftp").Warn("sftp events process already running, skipping...")
} else {
log.WithField("cron", "sftp").WithField("error", err).Error("sftp events process failed to execute")
}
}
})
return s, nil
}

View File

@@ -1,177 +0,0 @@
package cron
import (
"context"
"reflect"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
)
type sftpCron struct {
mu *system.AtomicBool
manager *server.Manager
max int
}
type mapKey struct {
User string
Server string
IP string
Event models.Event
Timestamp string
}
type eventMap struct {
max int
ids []int
m map[mapKey]*models.Activity
}
// Run executes the SFTP reconciliation cron. This job will pull all of the SFTP specific events
// and merge them together across user, server, ip, and event. This allows a SFTP event that deletes
// tens or hundreds of files to be tracked as a single "deletion" event so long as they all occur
// within the same one minute period of time (starting at the first timestamp for the group). Without
// this we'd end up flooding the Panel event log with excessive data that is of no use to end users.
func (sc *sftpCron) Run(ctx context.Context) error {
if !sc.mu.SwapIf(true) {
return errors.WithStack(ErrCronRunning)
}
defer sc.mu.Store(false)
var o int
activity, err := sc.fetchRecords(ctx, o)
if err != nil {
return err
}
o += len(activity)
events := &eventMap{
m: map[mapKey]*models.Activity{},
ids: []int{},
max: sc.max,
}
for {
if len(activity) == 0 {
break
}
slen := len(events.ids)
for _, a := range activity {
events.Push(a)
}
if len(events.ids) > slen {
// Execute the query again, we found some events so we want to continue
// with this. Start at the next offset.
activity, err = sc.fetchRecords(ctx, o)
if err != nil {
return errors.WithStack(err)
}
o += len(activity)
} else {
break
}
}
if len(events.m) == 0 {
return nil
}
if err := sc.manager.Client().SendActivityLogs(ctx, events.Elements()); err != nil {
return errors.Wrap(err, "failed to send sftp activity logs to Panel")
}
if tx := database.Instance().Where("id IN ?", events.ids).Delete(&models.Activity{}); tx.Error != nil {
return errors.WithStack(tx.Error)
}
return nil
}
// fetchRecords returns a group of activity events starting at the given offset. This is used
// since we might need to make multiple database queries to select enough events to properly
// fill up our request to the given maximum. This is due to the fact that this cron merges any
// activity that line up across user, server, ip, and event into a single activity record when
// sending the data to the Panel.
func (sc *sftpCron) fetchRecords(ctx context.Context, offset int) (activity []models.Activity, err error) {
tx := database.Instance().WithContext(ctx).
Where("event LIKE ?", "server:sftp.%").
Order("event DESC").
Offset(offset).
Limit(sc.max).
Find(&activity)
if tx.Error != nil {
err = errors.WithStack(tx.Error)
}
return
}
// Push adds an activity to the event mapping, or de-duplicates it and merges the files metadata
// into the existing entity that exists.
func (em *eventMap) Push(a models.Activity) {
m := em.forActivity(a)
// If no activity entity is returned we've hit the cap for the number of events to
// send along to the Panel. Just skip over this record and we'll account for it in
// the next iteration.
if m == nil {
return
}
em.ids = append(em.ids, a.ID)
// Always reduce this to the first timestamp that was recorded for the set
// of events, and not
if a.Timestamp.Before(m.Timestamp) {
m.Timestamp = a.Timestamp
}
list := m.Metadata["files"].([]interface{})
if s, ok := a.Metadata["files"]; ok {
v := reflect.ValueOf(s)
if v.Kind() != reflect.Slice || v.IsNil() {
return
}
for i := 0; i < v.Len(); i++ {
list = append(list, v.Index(i).Interface())
}
// You must set it again at the end of the process, otherwise you've only updated the file
// slice in this one loop since it isn't passed by reference. This is just shorter than having
// to explicitly keep casting it to the slice.
m.Metadata["files"] = list
}
}
// Elements returns the finalized activity models.
func (em *eventMap) Elements() (out []models.Activity) {
for _, v := range em.m {
out = append(out, *v)
}
return
}
// forActivity returns an event entity from our map which allows existing matches to be
// updated with additional files.
func (em *eventMap) forActivity(a models.Activity) *models.Activity {
key := mapKey{
User: a.User.String,
Server: a.Server,
IP: a.IP,
Event: a.Event,
// We group by the minute, don't care about the seconds for this logic.
Timestamp: a.Timestamp.Format("2006-01-02_15:04"),
}
if v, ok := em.m[key]; ok {
return v
}
// Cap the size of the events map at the defined maximum events to send to the Panel. Just
// return nil and let the caller handle it.
if len(em.m) >= em.max {
return nil
}
// Doesn't exist in our map yet, create a copy of the activity passed into this
// function and then assign it into the map with an empty metadata value.
v := a
v.Metadata = models.ActivityMeta{
"files": make([]interface{}, 0),
}
em.m[key] = &v
return &v
}

View File

@@ -1,61 +0,0 @@
package database
import (
"path/filepath"
"time"
"emperror.dev/errors"
"github.com/glebarez/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/system"
)
var (
o system.AtomicBool
db *gorm.DB
)
// Initialize configures the local SQLite database for Wings and ensures that the models have
// been fully migrated.
func Initialize() error {
if !o.SwapIf(true) {
panic("database: attempt to initialize more than once during application lifecycle")
}
p := filepath.Join(config.Get().System.RootDirectory, "wings.db")
instance, err := gorm.Open(sqlite.Open(p), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
return errors.Wrap(err, "database: could not open database file")
}
db = instance
if sql, err := db.DB(); err != nil {
return errors.WithStack(err)
} else {
sql.SetMaxOpenConns(1)
sql.SetConnMaxLifetime(time.Hour)
}
if tx := db.Exec("PRAGMA synchronous = OFF"); tx.Error != nil {
return errors.WithStack(tx.Error)
}
if tx := db.Exec("PRAGMA journal_mode = MEMORY"); tx.Error != nil {
return errors.WithStack(tx.Error)
}
if err := db.AutoMigrate(&models.Activity{}); err != nil {
return errors.WithStack(err)
}
return nil
}
// Instance returns the gorm database instance that was configured when the application was
// booted.
func Instance() *gorm.DB {
if db == nil {
panic("database: attempt to access instance before initialized")
}
return db
}

View File

@@ -1,69 +0,0 @@
package models
import (
"time"
"gorm.io/gorm"
"github.com/pterodactyl/wings/system"
)
type Event string
type ActivityMeta map[string]interface{}
// Activity defines an activity log event for a server entity performed by a user. This is
// used for tracking commands, power actions, and SFTP events so that they can be reconciled
// and sent back to the Panel instance to be displayed to the user.
type Activity struct {
ID int `gorm:"primaryKey;not null" json:"-"`
// User is UUID of the user that triggered this event, or an empty string if the event
// cannot be tied to a specific user, in which case we will assume it was the system
// user.
User JsonNullString `gorm:"type:uuid" json:"user"`
// Server is the UUID of the server this event is associated with.
Server string `gorm:"type:uuid;not null" json:"server"`
// Event is a string that describes what occurred, and is used by the Panel instance to
// properly associate this event in the activity logs.
Event Event `gorm:"index;not null" json:"event"`
// Metadata is either a null value, string, or a JSON blob with additional event specific
// metadata that can be provided.
Metadata ActivityMeta `gorm:"serializer:json" json:"metadata"`
// IP is the IP address that triggered this event, or an empty string if it cannot be
// determined properly. This should be the connecting user's IP address, and not the
// internal system IP.
IP string `gorm:"not null" json:"ip"`
Timestamp time.Time `gorm:"not null" json:"timestamp"`
}
// SetUser sets the current user that performed the action. If an empty string is provided
// it is cast into a null value when stored.
func (a Activity) SetUser(u string) *Activity {
var ns JsonNullString
if u == "" {
if err := ns.Scan(nil); err != nil {
panic(err)
}
} else {
if err := ns.Scan(u); err != nil {
panic(err)
}
}
a.User = ns
return &a
}
// BeforeCreate executes before we create any activity entry to ensure the IP address
// is trimmed down to remove any extraneous data, and the timestamp is set to the current
// system time and then stored as UTC.
func (a *Activity) BeforeCreate(_ *gorm.DB) error {
a.IP = system.TrimIPSuffix(a.IP)
if a.Timestamp.IsZero() {
a.Timestamp = time.Now()
}
a.Timestamp = a.Timestamp.UTC()
if a.Metadata == nil {
a.Metadata = ActivityMeta{}
}
return nil
}

View File

@@ -1,32 +0,0 @@
package models
import (
"database/sql"
"emperror.dev/errors"
"github.com/goccy/go-json"
)
type JsonNullString struct {
sql.NullString
}
func (v JsonNullString) MarshalJSON() ([]byte, error) {
if v.Valid {
return json.Marshal(v.String)
} else {
return json.Marshal(nil)
}
}
func (v *JsonNullString) UnmarshalJSON(data []byte) error {
var s *string
if err := json.Unmarshal(data, &s); err != nil {
return errors.WithStack(err)
}
if s != nil {
v.String = *s
}
v.Valid = s != nil
return nil
}

View File

@@ -1,330 +0,0 @@
package vhd
import (
"context"
"emperror.dev/errors"
"fmt"
"github.com/pterodactyl/wings/config"
"github.com/spf13/afero"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
)
var (
ErrInvalidDiskPathTarget = errors.Sentinel("vhd: disk path is a directory or symlink")
ErrMountPathNotDirectory = errors.Sentinel("vhd: mount point is not a directory")
ErrFilesystemMounted = errors.Sentinel("vhd: filesystem is already mounted")
ErrFilesystemNotMounted = errors.Sentinel("vhd: filesystem is not mounted")
ErrFilesystemExists = errors.Sentinel("vhd: filesystem already exists on disk")
)
var useDdAllocation bool
var setDdAllocator sync.Once
// hasExitCode allows this code to test the response error to see if there is
// an exit code available from the command call that can be used to determine if
// something went wrong.
type hasExitCode interface {
ExitCode() int
}
// Commander defines an interface that must be met for executing commands on the
// underlying OS. By default the vhd package will use Go's exec.Cmd type for
// execution. This interface allows stubbing out on tests, or potentially custom
// setups down the line.
type Commander interface {
Run() error
Output() ([]byte, error)
String() string
}
// CommanderProvider is a function that provides a struct meeting the Commander
// interface requirements.
type CommanderProvider func(ctx context.Context, name string, args ...string) Commander
// CfgOption is a configuration option callback for the Disk.
type CfgOption func(d *Disk) *Disk
// Disk represents the underlying virtual disk for the instance.
type Disk struct {
mu sync.RWMutex
// The total size of the disk allowed in bytes.
size int64
// The path where the disk image should be created.
diskPath string
// The point at which this disk should be made available on the system. This
// is where files can be read/written to.
mountAt string
fs afero.Fs
commander CommanderProvider
}
// DiskPath returns the underlying path that contains the virtual disk for the server
// identified by its UUID.
func DiskPath(uuid string) string {
return filepath.Join(config.Get().System.Data, ".vhd/", uuid+".img")
}
// Enabled returns true when VHD support is enabled on the instance.
func Enabled() bool {
return config.Get().Servers.Filesystem.Driver == config.FSDriverVHD
}
// New returns a new Disk instance. The "size" parameter should be provided in
// bytes of space allowed for the disk. An additional slice of option callbacks
// can be provided to programatically swap out the underlying filesystem
// implementation or the underlying command exection engine.
func New(size int64, diskPath string, mountAt string, opts ...func(*Disk)) *Disk {
if diskPath == "" || mountAt == "" {
panic("vhd: cannot specify an empty disk or mount path")
}
d := Disk{
size: size,
diskPath: diskPath,
mountAt: mountAt,
fs: afero.NewOsFs(),
commander: func(ctx context.Context, name string, args ...string) Commander {
return exec.CommandContext(ctx, name, args...)
},
}
for _, opt := range opts {
opt(&d)
}
return &d
}
// WithFs allows for a different underlying filesystem to be provided to the
// virtual disk manager.
func WithFs(fs afero.Fs) func(*Disk) {
return func(d *Disk) {
d.fs = fs
}
}
// WithCommander allows a different Commander provider to be provided.
func WithCommander(c CommanderProvider) func(*Disk) {
return func(d *Disk) {
d.commander = c
}
}
func (d *Disk) Path() string {
return d.diskPath
}
func (d *Disk) MountPath() string {
return d.mountAt
}
// Exists reports if the disk exists on the system yet or not. This only verifies
// the presence of the disk image, not the validity of it. An error is returned
// if the path exists but the destination is not a file or is a symlink.
func (d *Disk) Exists() (bool, error) {
d.mu.RLock()
defer d.mu.RUnlock()
st, err := d.fs.Stat(d.diskPath)
if err != nil && os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, errors.WithStack(err)
}
if !st.IsDir() && st.Mode()&os.ModeSymlink == 0 {
return true, nil
}
return false, errors.WithStack(ErrInvalidDiskPathTarget)
}
// IsMounted checks to see if the given disk is currently mounted.
func (d *Disk) IsMounted(ctx context.Context) (bool, error) {
find := d.mountAt + " ext4"
cmd := d.commander(ctx, "grep", "-qs", find, "/proc/mounts")
if err := cmd.Run(); err != nil {
if v, ok := err.(hasExitCode); ok {
if v.ExitCode() == 1 {
return false, nil
}
}
return false, errors.Wrap(err, "vhd: failed to execute grep for mount existence")
}
return true, nil
}
// Mount attempts to mount the disk as configured. If it does not exist or the
// mount command fails an error will be returned to the caller. This does not
// attempt to create the disk if it is missing from the filesystem.
//
// Attempting to mount a disk which does not exist will result in an error being
// returned to the caller. If the disk is already mounted an ErrFilesystemMounted
// error is returned to the caller.
func (d *Disk) Mount(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.mount(ctx)
}
// Unmount attempts to unmount the disk from the system. If the disk is not
// currently mounted this function is a no-op and ErrFilesystemNotMounted is
// returned to the caller.
func (d *Disk) Unmount(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.unmount(ctx)
}
// Allocate executes the "fallocate" command on the disk. This will first unmount
// the disk from the system before attempting to actually allocate the space. If
// this disk already exists on the machine it will be resized accordingly.
//
// DANGER! This will unmount the disk from the machine while performing this
// action, use caution when calling it during normal processes.
func (d *Disk) Allocate(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
if exists, err := d.Exists(); exists {
// If the disk currently exists attempt to unmount the mount point before
// allocating space.
if err := d.Unmount(ctx); err != nil {
return errors.WithStackIf(err)
}
} else if err != nil {
return errors.Wrap(err, "vhd: failed to check for existence of root disk")
}
trim := path.Base(d.diskPath)
if err := d.fs.MkdirAll(strings.TrimSuffix(d.diskPath, trim), 0700); err != nil {
return errors.Wrap(err, "vhd: failed to create base vhd disk directory")
}
cmd := d.allocationCmd(ctx)
if _, err := cmd.Output(); err != nil {
msg := "vhd: failed to execute space allocation command"
if v, ok := err.(*exec.ExitError); ok {
stderr := strings.Trim(string(v.Stderr), ".\n")
if !useDdAllocation && strings.HasSuffix(stderr, "not supported") {
// Try again: fallocate is not supported on some filesystems so we'll fall
// back to making use of dd for subsequent operations.
setDdAllocator.Do(func() {
useDdAllocation = true
})
return d.Allocate(ctx)
}
msg = msg + ": " + stderr
}
return errors.Wrap(err, msg)
}
return errors.WithStack(d.fs.Chmod(d.diskPath, 0600))
}
// Resize will change the internal disk size limit and then allocate the new
// space to the disk automatically.
func (d *Disk) Resize(ctx context.Context, size int64) error {
atomic.StoreInt64(&d.size, size)
return d.Allocate(ctx)
}
// Destroy removes the underlying allocated disk image and unmounts the disk.
func (d *Disk) Destroy(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
if err := d.unmount(ctx); err != nil {
return errors.WithStackIf(err)
}
return errors.WithStackIf(d.fs.RemoveAll(d.mountAt))
}
// MakeFilesystem will attempt to execute the "mkfs" command against the disk on
// the machine. If the disk has already been created this command will return an
// ErrFilesystemExists error to the caller. You should manually unmount the disk
// if it shouldn't be mounted at this point.
func (d *Disk) MakeFilesystem(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
// If no error is returned when mounting DO NOT execute this command as it will
// completely destroy the data stored at that location.
err := d.Mount(ctx)
if err == nil || errors.Is(err, ErrFilesystemMounted) {
// If it wasn't already mounted try to clean up at this point and unmount
// the disk. If this fails just ignore it for now.
if err != nil {
_ = d.Unmount(ctx)
}
return ErrFilesystemExists
}
if !strings.Contains(err.Error(), "can't find in /etc/fstab") && !strings.Contains(err.Error(), "exit status 32") {
return errors.WrapIf(err, "vhd: unexpected error from mount command")
}
// As long as we got an error back that was because we couldn't find thedisk
// in the /etc/fstab file we're good. Otherwise it means the disk probably exists
// or something else went wrong.
//
// Because this is a destructive command and non-tty based exection of it implies
// "-F" (force), we need to only run it when we can guarantee it doesn't already
// exist. No vague "maybe that error is expected" allowed here.
cmd := d.commander(ctx, "mkfs", "-t", "ext4", d.diskPath)
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "vhd: failed to make filesystem for disk")
}
return nil
}
func (d *Disk) mount(ctx context.Context) error {
if isMounted, err := d.IsMounted(ctx); err != nil {
return errors.WithStackIf(err)
} else if isMounted {
return ErrFilesystemMounted
}
if st, err := d.fs.Stat(d.mountAt); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "vhd: failed to stat mount path")
} else if os.IsNotExist(err) {
if err := d.fs.MkdirAll(d.mountAt, 0700); err != nil {
return errors.Wrap(err, "vhd: failed to create mount path")
}
} else if !st.IsDir() {
return errors.WithStack(ErrMountPathNotDirectory)
}
u := config.Get().System.User
if err := d.fs.Chown(d.mountAt, u.Uid, u.Gid); err != nil {
return errors.Wrap(err, "vhd: failed to chown mount point")
}
cmd := d.commander(ctx, "mount", "-t", "auto", "-o", "loop", d.diskPath, d.mountAt)
if _, err := cmd.Output(); err != nil {
msg := "vhd: failed to mount disk"
if v, ok := err.(*exec.ExitError); ok {
msg = msg + ": " + strings.Trim(string(v.Stderr), ".\n")
}
return errors.Wrap(err, msg)
}
return nil
}
func (d *Disk) unmount(ctx context.Context) error {
cmd := d.commander(ctx, "umount", d.mountAt)
if err := cmd.Run(); err != nil {
v, ok := err.(hasExitCode)
if ok && v.ExitCode() == 32 {
return ErrFilesystemNotMounted
}
return errors.Wrap(err, "vhd: failed to execute unmount command for disk")
}
return nil
}
// allocationCmd returns the command to allocate the disk image. This will attempt to
// use the fallocate command if available, otherwise it will fall back to dd if the
// fallocate command has previously failed.
//
// We use 1024 as the multiplier for all of the disk space logic within the application.
// Passing "K" (/1024) is the same as "KiB" for fallocate, but is different than "KB" (/1000).
func (d *Disk) allocationCmd(ctx context.Context) Commander {
s := atomic.LoadInt64(&d.size) / 1024
if useDdAllocation {
return d.commander(ctx, "dd", "if=/dev/zero", fmt.Sprintf("of=%s", d.diskPath), fmt.Sprintf("bs=%dk", s), "count=1")
}
return d.commander(ctx, "fallocate", "-l", fmt.Sprintf("%dK", s), d.diskPath)
}

View File

@@ -1,476 +0,0 @@
package vhd
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"testing"
"github.com/pterodactyl/wings/config"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func init() {
config.Set(&config.Configuration{
AuthenticationToken: "token123",
System: config.SystemConfiguration{
User: struct {
Uid int
Gid int
}{Uid: 10, Gid: 10},
},
})
}
type mockCmd struct {
run func() error
output func() ([]byte, error)
string func() string
}
func (m *mockCmd) Run() error {
if m.run != nil {
return m.run()
}
return nil
}
func (m *mockCmd) Output() ([]byte, error) {
if m.output != nil {
return m.output()
}
return nil, nil
}
func (m *mockCmd) String() string {
if m.string != nil {
return m.string()
}
return ""
}
var _ Commander = (*mockCmd)(nil)
type mockedExitCode struct {
code int
}
func (m *mockedExitCode) ExitCode() int {
return m.code
}
func (m *mockedExitCode) Error() string {
return fmt.Sprintf("mocked exit code: code %d", m.code)
}
func newMockDisk(c CommanderProvider) *Disk {
commander := func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{}
}
w := commander
if c != nil {
w = c
}
return New(100 * 1024 * 1024, "/disk.img", "/mnt", WithFs(afero.NewMemMapFs()), WithCommander(w))
}
func Test_New(t *testing.T) {
t.Run("creates expected struct", func(t *testing.T) {
d := New(100 * 1024 * 1024, "/disk.img", "/mnt")
assert.NotNil(t, d)
assert.Equal(t, int64(100 * 1024 * 1024), d.size)
assert.Equal(t, "/disk.img", d.diskPath)
assert.Equal(t, "/mnt", d.mountAt)
// Ensure by default we get a commander interface returned and that it
// returns an *exec.Cmd.
o := d.commander(context.TODO(), "foo", "-bar")
assert.NotNil(t, o)
_, ok := o.(Commander)
assert.True(t, ok)
_, ok = o.(*exec.Cmd)
assert.True(t, ok)
})
t.Run("creates an instance with custom options", func(t *testing.T) {
fs := afero.NewMemMapFs()
cprov := struct {
Commander
}{}
c := func(ctx context.Context, name string, args ...string) Commander {
return &cprov
}
d := New(100, "/disk.img", "/mnt", WithFs(fs), WithCommander(c))
assert.NotNil(t, d)
assert.Same(t, fs, d.fs)
assert.Same(t, &cprov, d.commander(context.TODO(), ""))
})
t.Run("panics if either path is empty", func(t *testing.T) {
assert.Panics(t, func() {
_ = New(100, "", "/bar")
})
assert.Panics(t, func() {
_ = New(100, "/foo", "")
})
})
}
func TestDisk_Exists(t *testing.T) {
t.Run("it exists", func(t *testing.T) {
d := newMockDisk(nil)
f, err := d.fs.Create("/disk.img")
require.NoError(t, err)
_ = f.Close()
exists, err := d.Exists()
assert.NoError(t, err)
assert.True(t, exists)
})
t.Run("it does not exist", func(t *testing.T) {
d := newMockDisk(nil)
exists, err := d.Exists()
assert.NoError(t, err)
assert.False(t, exists)
})
t.Run("it reports errors", func(t *testing.T) {
d := newMockDisk(nil)
err := d.fs.Mkdir("/disk.img", 0600)
require.NoError(t, err)
exists, err := d.Exists()
assert.Error(t, err)
assert.False(t, exists)
assert.EqualError(t, err, ErrInvalidDiskPathTarget.Error())
})
}
func TestDisk_IsMounted(t *testing.T) {
t.Run("executes command and finds mounted disk", func(t *testing.T) {
is := assert.New(t)
var called bool
pctx := context.TODO()
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
is.Same(pctx, ctx)
is.Equal("grep", name)
is.Len(args, 3)
is.Equal([]string{"-qs", "/mnt ext4", "/proc/mounts"}, args)
return &mockCmd{}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(pctx)
is.NoError(err)
is.True(mnt)
is.True(called)
})
t.Run("handles exit code 1 gracefully", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(context.TODO())
assert.NoError(t, err)
assert.False(t, mnt)
assert.True(t, called)
})
t.Run("handles unexpected errors successfully", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 3}
},
}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(context.TODO())
assert.Error(t, err)
assert.False(t, mnt)
})
}
func TestDisk_Mount(t *testing.T) {
failedCmd := func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{run: func() error {
return &mockedExitCode{code: 1}
}}
}
t.Run("error is returned if mount point is not a directory", func(t *testing.T) {
disk := newMockDisk(failedCmd)
_, err := disk.fs.Create("/mnt")
require.NoError(t, err)
err = disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrMountPathNotDirectory.Error())
})
t.Run("error is returned if mount point cannot be created", func(t *testing.T) {
disk := newMockDisk(failedCmd)
disk.fs = afero.NewReadOnlyFs(disk.fs)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to create mount path: operation not permitted")
})
t.Run("error is returned if already mounted", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrFilesystemMounted.Error())
})
t.Run("error is returned if mount command fails", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
output: func() ([]byte, error) {
called = true
assert.Equal(t, "mount", name)
assert.Equal(t, []string{"-t", "auto", "-o", "loop", "/disk.img", "/mnt"}, args)
return nil, &exec.ExitError{
ProcessState: &os.ProcessState{},
Stderr: []byte("foo bar.\n"),
}
},
}
}
disk := newMockDisk(cmd)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to mount disk: foo bar: exit status 0")
assert.True(t, called)
})
t.Run("disk can be mounted at existing path", func(t *testing.T) {
disk := newMockDisk(failedCmd)
require.NoError(t, disk.fs.Mkdir("/mnt", 0600))
err := disk.Mount(context.TODO())
assert.NoError(t, err)
})
t.Run("disk can be mounted at non-existing path", func(t *testing.T) {
disk := newMockDisk(failedCmd)
err := disk.Mount(context.TODO())
assert.NoError(t, err)
st, err := disk.fs.Stat("/mnt")
assert.NoError(t, err)
assert.True(t, st.IsDir())
})
}
func TestDisk_Unmount(t *testing.T) {
t.Run("can unmount a disk", func(t *testing.T) {
is := assert.New(t)
pctx := context.TODO()
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
is.Same(pctx, ctx)
is.Equal("umount", name)
is.Equal([]string{"/mnt"}, args)
return &mockCmd{}
}
disk := newMockDisk(cmd)
err := disk.Unmount(pctx)
is.NoError(err)
is.True(called)
})
t.Run("handles exit code 32 correctly", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 32}
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.NoError(t, err)
})
t.Run("non code 32 errors are returned as error", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.Error(t, err)
})
t.Run("errors without ExitCode function are returned", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return errors.New("foo bar")
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.Error(t, err)
})
}
func TestDisk_Allocate(t *testing.T) {
t.Run("disk is unmounted before allocating space", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
output: func() ([]byte, error) {
called = true
assert.Equal(t, "fallocate", name)
assert.Equal(t, []string{"-l", "102400K", "/disk.img"}, args)
return nil, nil
},
}
}
disk := newMockDisk(cmd)
err := disk.fs.Mkdir("/mnt", 0600)
require.NoError(t, err)
err = disk.Allocate(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("disk space is allocated even when not exists", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.Allocate(context.TODO())
assert.NoError(t, err)
})
t.Run("error is returned if command fails", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
output: func() ([]byte, error) {
return nil, &exec.ExitError{
ProcessState: &os.ProcessState{},
Stderr: []byte("foo bar.\n"),
}
},
}
}
disk := newMockDisk(cmd)
_, err := disk.fs.Create("/disk.img")
require.NoError(t, err)
err = disk.Allocate(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to execute fallocate command: foo bar: exit status 0")
})
}
func TestDisk_MakeFilesystem(t *testing.T) {
t.Run("filesystem is created if not found in /etc/fstab", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
// Expect the call from IsMounted here and just return what we need
// to indicate that nothing is currently mounted.
if name == "grep" {
return &mockedExitCode{code: 1}
}
called = true
assert.Equal(t, "mkfs", name)
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
return nil
},
output: func() ([]byte, error) {
return nil, errors.New("error: can't find in /etc/fstab foo bar testing")
},
}
}
disk := newMockDisk(cmd)
err := disk.MakeFilesystem(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("filesystem is created if error is returned from mount command", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
// Expect the call from IsMounted here and just return what we need
// to indicate that nothing is currently mounted.
if name == "grep" {
return &mockedExitCode{code: 1}
}
called = true
assert.Equal(t, "mkfs", name)
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
return nil
},
output: func() ([]byte, error) {
if name == "mount" {
return nil, &exec.ExitError{
Stderr: []byte("foo bar: exit status 32\n"),
}
}
return nil, nil
},
}
}
disk := newMockDisk(cmd)
err := disk.MakeFilesystem(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("error is returned if currently mounted", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.MakeFilesystem(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrFilesystemExists.Error())
})
}

View File

@@ -15,7 +15,7 @@ import (
"github.com/icza/dyno"
"github.com/magiconair/properties"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
"github.com/pterodactyl/wings/config"
)

View File

@@ -10,14 +10,11 @@ import (
"strings"
"time"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/cenkalti/backoff/v4"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)
@@ -33,7 +30,6 @@ type Client interface {
SetInstallationStatus(ctx context.Context, uuid string, successful bool) error
SetTransferStatus(ctx context.Context, uuid string, successful bool) error
ValidateSftpCredentials(ctx context.Context, request SftpAuthRequest) (SftpAuthResponse, error)
SendActivityLogs(ctx context.Context, activity []models.Activity) error
}
type client struct {
@@ -60,18 +56,6 @@ func New(base string, opts ...ClientOption) Client {
return &c
}
// NewFromConfig returns a new Client using the configuration passed through
// by the caller.
func NewFromConfig(cfg *config.Configuration, opts ...ClientOption) Client {
passOpts := []ClientOption{
WithCredentials(cfg.AuthenticationTokenId, cfg.AuthenticationToken),
WithHttpClient(&http.Client{
Timeout: time.Second * time.Duration(cfg.RemoteQuery.Timeout),
}),
}
return New(cfg.PanelLocation, append(passOpts, opts...)...)
}
// WithCredentials sets the credentials to use when making request to the remote
// API endpoint.
func WithCredentials(id, token string) ClientOption {
@@ -144,19 +128,10 @@ func (c *client) requestOnce(ctx context.Context, method, path string, body io.R
// and adds the required authentication headers to the request that is being
// created. Errors returned will be of the RequestError type if there was some
// type of response from the API that can be parsed.
func (c *client) request(ctx context.Context, method, path string, body *bytes.Buffer, opts ...func(r *http.Request)) (*Response, error) {
func (c *client) request(ctx context.Context, method, path string, body io.Reader, opts ...func(r *http.Request)) (*Response, error) {
var res *Response
err := backoff.Retry(func() error {
var b bytes.Buffer
if body != nil {
// We have to create a copy of the body, otherwise attempting this request again will
// send no data if there was initially a body since the "requestOnce" method will read
// the whole buffer, thus leaving it empty at the end.
if _, err := b.Write(body.Bytes()); err != nil {
return backoff.Permanent(errors.Wrap(err, "http: failed to copy body buffer"))
}
}
r, err := c.requestOnce(ctx, method, path, &b, opts...)
r, err := c.requestOnce(ctx, method, path, body, opts...)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return backoff.Permanent(err)

View File

@@ -6,8 +6,6 @@ import (
"strconv"
"sync"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"golang.org/x/sync/errgroup"
@@ -180,16 +178,6 @@ func (c *client) SendRestorationStatus(ctx context.Context, backup string, succe
return nil
}
// SendActivityLogs sends activity logs back to the Panel for processing.
func (c *client) SendActivityLogs(ctx context.Context, activity []models.Activity) error {
resp, err := c.Post(ctx, "/activity", d{"data": activity})
if err != nil {
return errors.WithStackIf(err)
}
_ = resp.Body.Close()
return nil
}
// getServersPaged returns a subset of servers from the Panel API using the
// pagination query parameters.
func (c *client) getServersPaged(ctx context.Context, page, limit int) ([]RawServerData, Pagination, error) {

View File

@@ -87,7 +87,6 @@ type SftpAuthRequest struct {
// user for the SFTP subsystem.
type SftpAuthResponse struct {
Server string `json:"server"`
User string `json:"user"`
Permissions []string `json:"permissions"`
}
@@ -157,15 +156,9 @@ type BackupRemoteUploadResponse struct {
PartSize int64 `json:"part_size"`
}
type BackupPart struct {
ETag string `json:"etag"`
PartNumber int `json:"part_number"`
}
type BackupRequest struct {
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
Successful bool `json:"successful"`
Parts []BackupPart `json:"parts"`
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
Successful bool `json:"successful"`
}

View File

@@ -4,7 +4,6 @@ import (
"github.com/apex/log"
"github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/router/middleware"
wserver "github.com/pterodactyl/wings/server"
@@ -16,7 +15,6 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
router := gin.New()
router.Use(gin.Recovery())
router.SetTrustedProxies(config.Get().Api.TrustedProxies)
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.

View File

@@ -9,7 +9,6 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/router/downloader"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/tokens"
@@ -181,7 +180,7 @@ func postServerReinstall(c *gin.Context) {
c.Status(http.StatusAccepted)
}
// Deletes a server from the wings daemon and dissociate its objects.
// Deletes a server from the wings daemon and dissociate it's objects.
func deleteServer(c *gin.Context) {
s := middleware.ExtractServer(c)

View File

@@ -13,8 +13,6 @@ import (
"strconv"
"strings"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/config"
"emperror.dev/errors"
@@ -602,11 +600,6 @@ func postServerUploadFiles(c *gin.Context) {
if err := handleFileUpload(p, s, header); err != nil {
NewServerError(err, s).Abort(c)
return
} else {
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.ClientIP()), server.ActivityFileUploaded, models.ActivityMeta{
"file": header.Filename,
"directory": filepath.Clean(directory),
})
}
}
}
@@ -624,5 +617,6 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader)
if err := s.Filesystem().Writefile(p, file); err != nil {
return err
}
return nil
}

View File

@@ -32,7 +32,7 @@ func getServerWebsocket(c *gin.Context) {
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
handler, err := websocket.GetHandler(s, c.Writer, c.Request, c)
handler, err := websocket.GetHandler(s, c.Writer, c.Request)
if err != nil {
NewServerError(err, s).Abort(c)
return

View File

@@ -12,6 +12,7 @@ import (
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"time"
"emperror.dev/errors"
@@ -29,9 +30,19 @@ import (
"github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/filesystem"
"github.com/pterodactyl/wings/system"
)
const progressWidth = 25
// Number of ticks in the progress bar
const ticks = 25
// 100% / number of ticks = percentage represented by each tick
const tickPercentage = 100 / ticks
type downloadProgress struct {
size int64
progress int64
}
// Data passed over to initiate a server transfer.
type serverTransferRequest struct {
@@ -84,7 +95,7 @@ func getServerArchive(c *gin.Context) {
return
}
// Compute sha256 checksum.
// Compute sha1 checksum.
h := sha256.New()
f, err := os.Open(archivePath)
if err != nil {
@@ -173,35 +184,11 @@ func postServerArchive(c *gin.Context) {
return
}
// Get the disk usage of the server (used to calculate the progress of the archive process)
rawSize, err := s.Filesystem().DiskUsage(true)
if err != nil {
sendTransferLog("Failed to get disk usage for server, aborting transfer..")
l.WithField("error", err).Error("failed to get disk usage for server")
return
}
// Create an archive of the entire server's data directory.
a := &filesystem.Archive{
BasePath: s.Filesystem().Path(),
Progress: filesystem.NewProgress(rawSize),
}
// Send the archive progress to the websocket every 3 seconds.
ctx, cancel := context.WithCancel(s.Context())
defer cancel()
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
sendTransferLog("Archiving " + p.Progress(progressWidth))
}
}
}(ctx, a.Progress, time.NewTicker(5*time.Second))
// Attempt to get an archive of the server.
if err := a.Create(getArchivePath(s.ID())); err != nil {
sendTransferLog("An error occurred while archiving the server: " + err.Error())
@@ -209,12 +196,6 @@ func postServerArchive(c *gin.Context) {
return
}
// Cancel the progress ticker.
cancel()
// Show 100% completion.
sendTransferLog("Archiving " + a.Progress.Progress(progressWidth))
sendTransferLog("Successfully created archive, attempting to notify panel..")
l.Info("successfully created server transfer archive, notifying panel..")
@@ -242,6 +223,12 @@ func postServerArchive(c *gin.Context) {
c.Status(http.StatusAccepted)
}
func (w *downloadProgress) Write(v []byte) (int, error) {
n := len(v)
atomic.AddInt64(&w.progress, int64(n))
return n, nil
}
// Log helper function to attach all errors and info output to a consistently formatted
// log string for easier querying.
func (str serverTransferRequest) log() *log.Entry {
@@ -334,7 +321,7 @@ func postTransfer(c *gin.Context) {
manager := middleware.ExtractManager(c)
u, err := uuid.Parse(data.ServerID)
if err != nil {
_ = WithError(c, err)
WithError(c, err)
return
}
// Force the server ID to be a valid UUID string at this point. If it is not an error
@@ -344,12 +331,11 @@ func postTransfer(c *gin.Context) {
data.log().Info("handling incoming server transfer request")
go func(data *serverTransferRequest) {
ctx := context.Background()
hasError := true
// Create a new server installer. This will only configure the environment and not
// run the installer scripts.
i, err := installer.New(ctx, manager, data.Server)
i, err := installer.New(context.Background(), manager, data.Server)
if err != nil {
_ = data.sendTransferStatus(manager.Client(), false)
data.log().WithField("error", err).Error("failed to validate received server data")
@@ -421,22 +407,25 @@ func postTransfer(c *gin.Context) {
sendTransferLog("Writing archive to disk...")
data.log().Info("writing transfer archive to disk...")
progress := filesystem.NewProgress(size)
// Send the archive progress to the websocket every 3 seconds.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
sendTransferLog("Downloading " + p.Progress(progressWidth))
}
// Copy the file.
progress := &downloadProgress{size: size}
ticker := time.NewTicker(3 * time.Second)
go func(progress *downloadProgress, t *time.Ticker) {
for range ticker.C {
// p = 100 (Downloaded)
// size = 1000 (Content-Length)
// p / size = 0.1
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
// 2.5 (Number of ticks as a float64)
// 2 (convert to an integer)
p := atomic.LoadInt64(&progress.progress)
// We have to cast these numbers to float in order to get a float result from the division.
width := ((float64(p) / float64(size)) * 100) / tickPercentage
bar := strings.Repeat("=", int(width)) + strings.Repeat(" ", ticks-int(width))
sendTransferLog("Downloading [" + bar + "] " + system.FormatBytes(p) + " / " + system.FormatBytes(progress.size))
}
}(ctx, progress, time.NewTicker(5*time.Second))
}(progress, ticker)
var reader io.Reader
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
@@ -449,16 +438,18 @@ func postTransfer(c *gin.Context) {
buf := make([]byte, 1024*4)
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
ticker.Stop()
_ = file.Close()
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
data.log().WithField("error", err).Error("failed to copy archive file to disk")
return
}
cancel()
ticker.Stop()
// Show 100% completion.
sendTransferLog("Downloading " + progress.Progress(progressWidth))
humanSize := system.FormatBytes(progress.size)
sendTransferLog("Downloading [" + strings.Repeat("=", ticks) + "] " + humanSize + " / " + humanSize)
if err := file.Close(); err != nil {
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")

View File

@@ -8,7 +8,6 @@ type UploadPayload struct {
jwt.Payload
ServerUuid string `json:"server_uuid"`
UserUuid string `json:"user_uuid"`
UniqueId string `json:"unique_id"`
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3"
"github.com/goccy/go-json"
)
// The time at which Wings was booted. No JWT's created before this time are allowed to
@@ -34,15 +35,15 @@ func DenyJTI(jti string) {
denylist.Store(jti, time.Now())
}
// WebsocketPayload defines the JWT payload for a websocket connection. This JWT is passed along to
// the websocket after it has been connected to by sending an "auth" event.
// A JWT payload for Websocket connections. This JWT is passed along to the Websocket after
// it has been connected to by sending an "auth" event.
type WebsocketPayload struct {
jwt.Payload
sync.RWMutex
UserUUID string `json:"user_uuid"`
ServerUUID string `json:"server_uuid"`
Permissions []string `json:"permissions"`
UserID json.Number `json:"user_id"`
ServerUUID string `json:"server_uuid"`
Permissions []string `json:"permissions"`
}
// Returns the JWT payload.

View File

@@ -7,7 +7,6 @@ import (
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"

View File

@@ -8,16 +8,12 @@ import (
"sync"
"time"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3"
"github.com/gin-gonic/gin"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/config"
@@ -44,7 +40,6 @@ type Handler struct {
Connection *websocket.Conn `json:"-"`
jwt *tokens.WebsocketPayload
server *server.Server
ra server.RequestActivity
uuid uuid.UUID
}
@@ -82,7 +77,7 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
}
// GetHandler returns a new websocket handler using the context provided.
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin.Context) (*Handler, error) {
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Handler, error) {
upgrader := websocket.Upgrader{
// Ensure that the websocket request is originating from the Panel itself,
// and not some other location.
@@ -114,7 +109,6 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin
Connection: conn,
jwt: nil,
server: s,
ra: s.NewRequestActivity("", c.ClientIP()),
uuid: u,
}, nil
}
@@ -270,7 +264,6 @@ func (h *Handler) GetJwt() *tokens.WebsocketPayload {
// setJwt sets the JWT for the websocket in a race-safe manner.
func (h *Handler) setJwt(token *tokens.WebsocketPayload) {
h.Lock()
h.ra = h.ra.SetUser(token.UserUUID)
h.jwt = token
h.Unlock()
}
@@ -372,10 +365,6 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
return nil
}
if err == nil {
h.server.SaveActivity(h.ra, models.Event(server.ActivityPowerPrefix+action), nil)
}
return err
}
case SendServerLogsEvent:
@@ -432,13 +421,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
}
}
if err := h.server.Environment.SendCommand(strings.Join(m.Args, "")); err != nil {
return err
}
h.server.SaveActivity(h.ra, server.ActivityConsoleCommand, models.ActivityMeta{
"command": strings.Join(m.Args, ""),
})
return nil
return h.server.Environment.SendCommand(strings.Join(m.Args, ""))
}
}

View File

@@ -1,5 +1,5 @@
Name: ptero-wings
Version: 1.7.0
Version: 1.5.3
Release: 1%{?dist}
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
BuildArch: x86_64
@@ -91,9 +91,6 @@ rm -rf /var/log/pterodactyl
wings --version
%changelog
* Wed Sep 14 2022 Chance Callahan <ccallaha@redhat.com> - 1.7.0-1
- Updating specfile to match stable release.
* Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3

View File

@@ -1,66 +0,0 @@
package server
import (
"context"
"time"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
)
const ActivityPowerPrefix = "server:power."
const (
ActivityConsoleCommand = models.Event("server:console.command")
ActivitySftpWrite = models.Event("server:sftp.write")
ActivitySftpCreate = models.Event("server:sftp.create")
ActivitySftpCreateDirectory = models.Event("server:sftp.create-directory")
ActivitySftpRename = models.Event("server:sftp.rename")
ActivitySftpDelete = models.Event("server:sftp.delete")
ActivityFileUploaded = models.Event("server:file.uploaded")
)
// RequestActivity is a wrapper around a LoggedEvent that is able to track additional request
// specific metadata including the specific user and IP address associated with all subsequent
// events. The internal logged event structure can be extracted by calling RequestEvent.Event().
type RequestActivity struct {
server string
user string
ip string
}
// Event returns the underlying logged event from the RequestEvent instance and sets the
// specific event and metadata on it.
func (ra RequestActivity) Event(event models.Event, metadata models.ActivityMeta) *models.Activity {
a := models.Activity{Server: ra.server, IP: ra.ip, Event: event, Metadata: metadata}
return a.SetUser(ra.user)
}
// SetUser clones the RequestActivity struct and sets a new user value on the copy
// before returning it.
func (ra RequestActivity) SetUser(u string) RequestActivity {
c := ra
c.user = u
return c
}
func (s *Server) NewRequestActivity(user string, ip string) RequestActivity {
return RequestActivity{server: s.ID(), user: user, ip: ip}
}
// SaveActivity saves an activity entry to the database in a background routine. If an error is
// encountered it is logged but not returned to the caller.
func (s *Server) SaveActivity(a RequestActivity, event models.Event, metadata models.ActivityMeta) {
ctx, cancel := context.WithTimeout(s.Context(), time.Second*3)
go func() {
defer cancel()
if tx := database.Instance().WithContext(ctx).Create(a.Event(event, metadata)); tx.Error != nil {
s.Log().WithField("error", errors.WithStack(tx.Error)).
WithField("event", event).
Error("activity: failed to save event")
}
}()
}

View File

@@ -32,7 +32,7 @@ type RestoreCallback func(file string, r io.Reader, mode fs.FileMode, atime, mti
// noinspection GoNameStartsWithPackageName
type BackupInterface interface {
// SetClient sets the API request client on the backup interface.
SetClient(remote.Client)
SetClient(c remote.Client)
// Identifier returns the UUID of this backup as tracked by the panel
// instance.
Identifier() string
@@ -41,7 +41,7 @@ type BackupInterface interface {
WithLogContext(map[string]interface{})
// Generate creates a backup in whatever the configured source for the
// specific implementation is.
Generate(context.Context, string, string) (*ArchiveDetails, error)
Generate(ctx context.Context, basePath string, ignore string) (*ArchiveDetails, error)
// Ignored returns the ignored files for this backup instance.
Ignored() string
// Checksum returns a SHA1 checksum for the generated backup.
@@ -53,13 +53,13 @@ type BackupInterface interface {
// to store it until it is moved to the final spot.
Path() string
// Details returns details about the archive.
Details(context.Context, []remote.BackupPart) (*ArchiveDetails, error)
Details(ctx context.Context) (*ArchiveDetails, error)
// Remove removes a backup file.
Remove() error
// Restore is called when a backup is ready to be restored to the disk from
// the given source. Not every backup implementation will support this nor
// will every implementation require a reader be provided.
Restore(context.Context, io.Reader, RestoreCallback) error
Restore(ctx context.Context, reader io.Reader, callback RestoreCallback) error
}
type Backup struct {
@@ -119,8 +119,8 @@ func (b *Backup) Checksum() ([]byte, error) {
// Details returns both the checksum and size of the archive currently stored on
// the disk to the caller.
func (b *Backup) Details(ctx context.Context, parts []remote.BackupPart) (*ArchiveDetails, error) {
ad := ArchiveDetails{ChecksumType: "sha1", Parts: parts}
func (b *Backup) Details(ctx context.Context) (*ArchiveDetails, error) {
ad := ArchiveDetails{ChecksumType: "sha1"}
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
@@ -162,10 +162,9 @@ func (b *Backup) log() *log.Entry {
}
type ArchiveDetails struct {
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
Parts []remote.BackupPart `json:"parts"`
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
}
// ToRequest returns a request object.
@@ -175,6 +174,5 @@ func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
ChecksumType: ad.ChecksumType,
Size: ad.Size,
Successful: successful,
Parts: ad.Parts,
}
}

View File

@@ -69,7 +69,7 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
}
b.log().Info("created backup successfully")
ad, err := b.Details(ctx, nil)
ad, err := b.Details(ctx)
if err != nil {
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
}

View File

@@ -71,11 +71,10 @@ func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*Arch
}
defer rc.Close()
parts, err := s.generateRemoteRequest(ctx, rc)
if err != nil {
if err := s.generateRemoteRequest(ctx, rc); err != nil {
return nil, err
}
ad, err := s.Details(ctx, parts)
ad, err := s.Details(ctx)
if err != nil {
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
}
@@ -126,20 +125,20 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
}
// Generates the remote S3 request and begins the upload.
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) ([]remote.BackupPart, error) {
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) error {
defer rc.Close()
s.log().Debug("attempting to get size of backup...")
size, err := s.Backup.Size()
if err != nil {
return nil, err
return err
}
s.log().WithField("size", size).Debug("got size of backup")
s.log().Debug("attempting to get S3 upload urls from Panel...")
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
if err != nil {
return nil, err
return err
}
s.log().Debug("got S3 upload urls from the Panel")
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
@@ -157,26 +156,22 @@ func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser)
}
// Attempt to upload the part.
etag, err := uploader.uploadPart(ctx, part, partSize)
if err != nil {
if _, err := uploader.uploadPart(ctx, part, partSize); err != nil {
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
return nil, err
return err
}
uploader.uploadedParts = append(uploader.uploadedParts, remote.BackupPart{
ETag: etag,
PartNumber: i + 1,
})
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
}
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
return uploader.uploadedParts, nil
return nil
}
type s3FileUploader struct {
io.ReadCloser
client *http.Client
uploadedParts []remote.BackupPart
client *http.Client
}
// newS3FileUploader returns a new file uploader instance.

View File

@@ -16,11 +16,6 @@ type EggConfiguration struct {
FileDenylist []string `json:"file_denylist"`
}
type ConfigurationMeta struct {
Name string `json:"name"`
Description string `json:"description"`
}
type Configuration struct {
mu sync.RWMutex
@@ -29,8 +24,6 @@ type Configuration struct {
// docker containers as well as in log output.
Uuid string `json:"uuid"`
Meta ConfigurationMeta `json:"meta"`
// Whether or not the server is in a suspended state. Suspended servers cannot
// be started or modified except in certain scenarios by an admin user.
Suspended bool `json:"suspended"`

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/mitchellh/colorstring"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)

View File

@@ -8,7 +8,6 @@ import (
"path/filepath"
"strings"
"sync"
"sync/atomic"
"emperror.dev/errors"
"github.com/apex/log"
@@ -18,7 +17,6 @@ import (
ignore "github.com/sabhiram/go-gitignore"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)
const memory = 4 * 1024
@@ -30,62 +28,6 @@ var pool = sync.Pool{
},
}
// Progress is used to track the progress of any I/O operation that are being
// performed.
type Progress struct {
// written is the total size of the files that have been written to the writer.
written int64
// Total is the total size of the archive in bytes.
total int64
// w .
w io.Writer
}
// NewProgress .
func NewProgress(total int64) *Progress {
return &Progress{total: total}
}
// Written returns the total number of bytes written.
// This function should be used when the progress is tracking data being written.
func (p *Progress) Written() int64 {
return atomic.LoadInt64(&p.written)
}
// Total returns the total size in bytes.
func (p *Progress) Total() int64 {
return atomic.LoadInt64(&p.total)
}
// Write totals the number of bytes that have been written to the writer.
func (p *Progress) Write(v []byte) (int, error) {
n := len(v)
atomic.AddInt64(&p.written, int64(n))
if p.w != nil {
return p.w.Write(v)
}
return n, nil
}
// Progress returns a formatted progress string for the current progress.
func (p *Progress) Progress(width int) string {
current := p.Written()
total := p.Total()
// v = 100 (Progress)
// size = 1000 (Content-Length)
// p / size = 0.1
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
// 2.5 (Number of ticks as a float64)
// 2 (convert to an integer)
// We have to cast these numbers to float in order to get a float result from the division.
ticks := ((float64(current) / float64(total)) * 100) / (float64(100) / float64(width))
bar := strings.Repeat("=", int(ticks)) + strings.Repeat(" ", width-int(ticks))
return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total)
}
type Archive struct {
// BasePath is the absolute path to create the archive from where Files and Ignore are
// relative to.
@@ -98,13 +40,10 @@ type Archive struct {
// Files specifies the files to archive, this takes priority over the Ignore option, if
// unspecified, all files in the BasePath will be archived unless Ignore is set.
Files []string
// Progress wraps the writer of the archive to pass through the progress tracker.
Progress *Progress
}
// Create creates an archive at dst with all the files defined in the
// included Files array.
// Create creates an archive at dst with all of the files defined in the
// included files struct.
func (a *Archive) Create(dst string) error {
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
@@ -123,34 +62,13 @@ func (a *Archive) Create(dst string) error {
writer = f
}
// Choose which compression level to use based on the compression_level configuration option
var compressionLevel int
switch config.Get().System.Backups.CompressionLevel {
case "none":
compressionLevel = pgzip.NoCompression
case "best_compression":
compressionLevel = pgzip.BestCompression
case "best_speed":
fallthrough
default:
compressionLevel = pgzip.BestSpeed
}
// Create a new gzip writer around the file.
gw, _ := pgzip.NewWriterLevel(writer, compressionLevel)
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
_ = gw.SetConcurrency(1<<20, 1)
defer gw.Close()
var pw io.Writer
if a.Progress != nil {
a.Progress.w = gw
pw = a.Progress
} else {
pw = gw
}
// Create a new tar writer around the gzip writer.
tw := tar.NewWriter(pw)
tw := tar.NewWriter(gw)
defer tw.Close()
// Configure godirwalk.
@@ -185,7 +103,7 @@ func (a *Archive) Create(dst string) error {
// being generated.
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
return func(path string, de *godirwalk.Dirent) error {
// Skip directories because we are walking them recursively.
// Skip directories because we walking them recursively.
if de.IsDir() {
return nil
}
@@ -230,7 +148,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
// Adds a given file path to the final archive being created.
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
// Lstat the file, this will give us the same information as Stat except that it will not
// follow a symlink to its target automatically. This is important to avoid including
// follow a symlink to it's target automatically. This is important to avoid including
// files that exist outside the server root unintentionally in the backup.
s, err := os.Lstat(p)
if err != nil {

View File

@@ -8,14 +8,10 @@ import (
"os"
"path"
"path/filepath"
"reflect"
"strings"
"sync/atomic"
"time"
gzip2 "github.com/klauspost/compress/gzip"
zip2 "github.com/klauspost/compress/zip"
"emperror.dev/errors"
"github.com/mholt/archiver/v3"
)
@@ -176,26 +172,13 @@ func ExtractNameFromArchive(f archiver.File) string {
return f.Name()
}
switch s := sys.(type) {
case *zip.FileHeader:
return s.Name
case *zip2.FileHeader:
return s.Name
case *tar.Header:
return s.Name
case *gzip.Header:
return s.Name
case *gzip2.Header:
case *zip.FileHeader:
return s.Name
default:
// At this point we cannot figure out what type of archive this might be so
// just try to find the name field in the struct. If it is found return it.
field := reflect.Indirect(reflect.ValueOf(sys)).FieldByName("Name")
if field.IsValid() {
return field.String()
}
// Fallback to the basename of the file at this point. There is nothing we can really
// do to try and figure out what the underlying directory of the file is supposed to
// be since it didn't implement a name field.
return f.Name()
}
}

View File

@@ -1,8 +1,6 @@
package filesystem
import (
"context"
"github.com/pterodactyl/wings/internal/vhd"
"sync"
"sync/atomic"
"syscall"
@@ -37,46 +35,18 @@ func (ult *usageLookupTime) Get() time.Time {
return ult.value
}
// MaxDisk returns the maximum amount of disk space that this Filesystem
// instance is allowed to use.
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
func (fs *Filesystem) MaxDisk() int64 {
return atomic.LoadInt64(&fs.diskLimit)
}
// SetDiskLimit sets the disk space limit for this Filesystem instance. This
// logic will also handle mounting or unmounting a virtual disk if it is being
// used currently.
func (fs *Filesystem) SetDiskLimit(ctx context.Context, i int64) error {
// Do nothing if this method is called but the limit is not changing.
if atomic.LoadInt64(&fs.diskLimit) == i {
return nil
}
if vhd.Enabled() {
if i == 0 && fs.IsVirtual() {
fs.log().Debug("disk limit changed to 0, destroying virtual disk")
// Remove the VHD if it is mounted so that we're just storing files directly on the system
// since we cannot have a virtual disk with a space limit enforced like that.
if err := fs.vhd.Destroy(ctx); err != nil {
return errors.WithStackIf(err)
}
fs.vhd = nil
}
// If we're setting a disk size go ahead and mount the VHD if it isn't already mounted,
// and then allocate the new space to the disk.
if i > 0 {
fs.log().Debug("disk limit updated, allocating new space to virtual disk")
if err := fs.ConfigureDisk(ctx, i); err != nil {
return errors.WithStackIf(err)
}
}
}
fs.log().WithField("limit", i).Debug("disk limit updated")
atomic.StoreInt64(&fs.diskLimit, i)
return nil
// Sets the disk space limit for this Filesystem instance.
func (fs *Filesystem) SetDiskLimit(i int64) {
atomic.SwapInt64(&fs.diskLimit, i)
}
// HasSpaceErr is the same concept as HasSpaceAvailable however this will return
// an error if there is no space, rather than a boolean value.
// The same concept as HasSpaceAvailable however this will return an error if there is
// no space, rather than a boolean value.
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
if !fs.HasSpaceAvailable(allowStaleValue) {
return newFilesystemError(ErrCodeDiskSpace, nil)
@@ -84,77 +54,67 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
return nil
}
// HasSpaceAvailable determines if the directory a file is trying to be added to
// has enough space available for the file to be written to.
// Determines if the directory a file is trying to be added to has enough space available
// for the file to be written to.
//
// Because determining the amount of space being used by a server is a taxing
// operation we will load it all up into a cache and pull from that as long as
// the key is not expired. This operation will potentially block unless
// allowStaleValue is set to true. See the documentation on DiskUsage for how
// this affects the call.
// Because determining the amount of space being used by a server is a taxing operation we
// will load it all up into a cache and pull from that as long as the key is not expired.
//
// If the current size of the disk is larger than the maximum allowed size this
// function will return false, in all other cases it will return true. We do
// not check the existence of a virtual disk at this point since this logic is
// used to return friendly error messages to users, and also prevent us wasting
// time on more taxing operations when we know the result will end up failing due
// to space limits.
//
// If the servers disk limit is set to 0 it means there is no limit, however the
// DiskUsage method is still called to keep the cache warm. This function will
// always return true for a server with no limit set.
// This operation will potentially block unless allowStaleValue is set to true. See the
// documentation on DiskUsage for how this affects the call.
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
size, err := fs.DiskUsage(allowStaleValue)
if err != nil {
fs.log().WithField("error", err).Warn("failed to determine root fs directory size")
log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
}
return fs.MaxDisk() == 0 || size <= fs.MaxDisk()
// If space is -1 or 0 just return true, means they're allowed unlimited.
//
// Technically we could skip disk space calculation because we don't need to check if the
// server exceeds it's limit but because this method caches the disk usage it would be best
// to calculate the disk usage and always return true.
if fs.MaxDisk() == 0 {
return true
}
return size <= fs.MaxDisk()
}
// CachedUsage returns the cached value for the amount of disk space used by the
// filesystem. Do not rely on this function for critical logical checks. It
// should only be used in areas where the actual disk usage does not need to be
// perfect, e.g. API responses for server resource usage.
// Returns the cached value for the amount of disk space used by the filesystem. Do not rely on this
// function for critical logical checks. It should only be used in areas where the actual disk usage
// does not need to be perfect, e.g. API responses for server resource usage.
func (fs *Filesystem) CachedUsage() int64 {
return atomic.LoadInt64(&fs.diskUsed)
}
// DiskUsage is an internal helper function to allow other parts of the codebase
// to check the total used disk space as needed without overly taxing the system.
// This will prioritize the value from the cache to avoid excessive IO usage. We
// will only walk the filesystem and determine the size of the directory if there
// Internal helper function to allow other parts of the codebase to check the total used disk space
// as needed without overly taxing the system. This will prioritize the value from the cache to avoid
// excessive IO usage. We will only walk the filesystem and determine the size of the directory if there
// is no longer a cached value.
//
// If "allowStaleValue" is set to true, a stale value MAY be returned to the
// caller if there is an expired cache value AND there is currently another
// lookup in progress. If there is no cached value but no other lookup is in
// progress, a fresh disk space response will be returned to the caller.
// If "allowStaleValue" is set to true, a stale value MAY be returned to the caller if there is an
// expired cache value AND there is currently another lookup in progress. If there is no cached value but
// no other lookup is in progress, a fresh disk space response will be returned to the caller.
//
// This is primarily to avoid a bunch of I/O operations from piling up on the
// server, especially on servers with a large amount of files.
// This is primarily to avoid a bunch of I/O operations from piling up on the server, especially on servers
// with a large amount of files.
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
// A disk check interval of 0 means this functionality is completely disabled.
if fs.diskCheckInterval == 0 {
return 0, nil
}
since := time.Now().Add(time.Second * fs.diskCheckInterval * -1)
// If the last lookup time was before our calculated limit we will re-execute this
// checking logic. If the lookup time was after the oldest possible timestamp we will
// continue returning the cached value.
if fs.lastLookupTime.Get().Before(since) {
if !fs.lastLookupTime.Get().After(time.Now().Add(time.Second * fs.diskCheckInterval * -1)) {
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
// value. This is a blocking operation to the calling process.
if !allowStaleValue {
return fs.updateCachedDiskUsage()
}
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
// currently performing a lookup, just do the disk usage calculation in the background.
if !fs.lookupInProgress.Load() {
} else if !fs.lookupInProgress.Load() {
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
// currently performing a lookup, just do the disk usage calculation in the background.
go func(fs *Filesystem) {
if _, err := fs.updateCachedDiskUsage(); err != nil {
fs.log().WithField("error", err).Warn("failed to update fs disk usage from within routine")
log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
}
}(fs)
}
@@ -234,14 +194,11 @@ func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
}
// HasSpaceFor is a function to determine if a server has space available for a
// file of a given size. If space is available, no error will be returned,
// otherwise an ErrNotEnoughSpace error will be raised. If this filesystem is
// configured as a virtual disk this function is a no-op as we will fall through
// to the native implementation to throw back an error if there is not disk
// space available.
// Helper function to determine if a server has space available for a file of a given size.
// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
// will be raised.
func (fs *Filesystem) HasSpaceFor(size int64) error {
if fs.IsVirtual() || fs.MaxDisk() == 0 {
if fs.MaxDisk() == 0 {
return nil
}
s, err := fs.DiskUsage(true)
@@ -277,7 +234,3 @@ func (fs *Filesystem) addDisk(i int64) int64 {
return atomic.AddInt64(&fs.diskUsed, i)
}
func (fs *Filesystem) log() *log.Entry {
return log.WithField("server", fs.uuid).WithField("root", fs.root)
}

View File

@@ -20,7 +20,6 @@ import (
ignore "github.com/sabhiram/go-gitignore"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/vhd"
"github.com/pterodactyl/wings/system"
)
@@ -31,23 +30,19 @@ type Filesystem struct {
diskUsed int64
diskCheckInterval time.Duration
denylist *ignore.GitIgnore
vhd *vhd.Disk
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
diskLimit int64
// The root data directory path for this Filesystem instance.
root string
uuid string
isTest bool
}
// New creates a new Filesystem instance for a given server.
func New(uuid string, size int64, denylist []string) *Filesystem {
root := filepath.Join(config.Get().System.Data, uuid)
fs := Filesystem{
uuid: uuid,
func New(root string, size int64, denylist []string) *Filesystem {
return &Filesystem{
root: root,
diskLimit: size,
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
@@ -55,15 +50,6 @@ func New(uuid string, size int64, denylist []string) *Filesystem {
lookupInProgress: system.NewAtomicBool(false),
denylist: ignore.CompileIgnoreLines(denylist...),
}
// If VHD support is enabled but this server is configured with no disk size
// limit we cannot actually use a virtual disk. In that case fall back to using
// the default driver.
if vhd.Enabled() && size > 0 {
fs.vhd = vhd.New(size, vhd.DiskPath(uuid), fs.root)
}
return &fs
}
// Path returns the root path for the Filesystem instance.
@@ -91,9 +77,9 @@ func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
return f, st, nil
}
// Touch acts by creating the given file and path on the disk if it is not present
// already. If it is present, the file is opened using the defaults which will
// truncate the contents. The opened file is then returned to the caller.
// Acts by creating the given file and path on the disk if it is not present already. If
// it is present, the file is opened using the defaults which will truncate the contents.
// The opened file is then returned to the caller.
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
cleaned, err := fs.SafePath(p)
if err != nil {
@@ -169,12 +155,6 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
buf := make([]byte, 1024*4)
sz, err := io.CopyBuffer(file, r, buf)
if err != nil {
if strings.Contains(err.Error(), "no space left on device") {
return newFilesystemError(ErrCodeDiskSpace, err)
}
return errors.WrapIf(err, "filesystem: failed to copy buffer for file write")
}
// Adjust the disk usage to account for the old size and the new size of the file.
fs.addDisk(sz - currentSize)
@@ -332,9 +312,8 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
return name + suffix + extension, nil
}
// Copy takes a given input file path and creates a copy of the file at the same
// location, appending a unique number to the end. For example, a copy of "test.txt"
// would create "test 2.txt" as the copy, then "test 3.txt" and so on.
// Copies a given file to the same location and appends a suffix to the file to indicate that
// it has been copied.
func (fs *Filesystem) Copy(p string) error {
cleaned, err := fs.SafePath(p)
if err != nil {

View File

@@ -1,42 +0,0 @@
package filesystem
import (
"context"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/vhd"
)
// IsVirtual returns true if the filesystem is currently using a virtual disk.
func (fs *Filesystem) IsVirtual() bool {
return fs.vhd != nil
}
// ConfigureDisk will attempt to create a new VHD if there is not one already
// created for the filesystem. If there is this method will attempt to resize
// the underlying data volume. Passing a size of 0 or less will panic.
func (fs *Filesystem) ConfigureDisk(ctx context.Context, size int64) error {
if size <= 0 {
panic("filesystem: attempt to configure disk with empty size")
}
if fs.vhd == nil {
fs.vhd = vhd.New(size, vhd.DiskPath(fs.uuid), fs.root)
if err := fs.MountDisk(ctx); err != nil {
return errors.WithStackIf(err)
}
}
// Resize the disk now that it is for sure mounted and exists on the system.
if err := fs.vhd.Resize(ctx, size); err != nil {
return errors.WithStackIf(err)
}
return nil
}
// MountDisk will attempt to mount the underlying virtual disk for the server.
// If the disk is already mounted this is a no-op function.
func (fs *Filesystem) MountDisk(ctx context.Context) error {
err := fs.vhd.Mount(ctx)
if errors.Is(err, vhd.ErrFilesystemMounted) {
return nil
}
return errors.WrapIf(err, "filesystem: failed to mount VHD")
}

View File

@@ -18,7 +18,6 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/remote"
@@ -450,7 +449,6 @@ func (ip *InstallationProcess) Execute() (string, error) {
},
Privileged: true,
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
}
// Ensure the root directory for the server exists properly before attempting

View File

@@ -8,7 +8,6 @@ import (
"time"
"github.com/apex/log"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"sync"
"time"
@@ -22,18 +23,16 @@ import (
)
type Manager struct {
mu sync.RWMutex
client remote.Client
skipVhdInitialization bool
servers []*Server
mu sync.RWMutex
client remote.Client
servers []*Server
}
// NewManager returns a new server manager instance. This will boot up all the
// servers that are currently present on the filesystem and set them into the
// manager.
func NewManager(ctx context.Context, client remote.Client, skipVhdInit bool) (*Manager, error) {
func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
m := NewEmptyManager(client)
m.skipVhdInitialization = skipVhdInit
if err := m.init(ctx); err != nil {
return nil, err
}
@@ -53,24 +52,6 @@ func (m *Manager) Client() remote.Client {
return m.client
}
// Len returns the count of servers stored in the manager instance.
func (m *Manager) Len() int {
m.mu.RLock()
defer m.mu.RUnlock()
return len(m.servers)
}
// Keys returns all of the server UUIDs stored in the manager set.
func (m *Manager) Keys() []string {
m.mu.RLock()
defer m.mu.RUnlock()
keys := make([]string, len(m.servers))
for i, s := range m.servers {
keys[i] = s.ID()
}
return keys
}
// Put replaces all the current values in the collection with the value that
// is passed through.
func (m *Manager) Put(s []*Server) {
@@ -185,7 +166,7 @@ func (m *Manager) ReadStates() (map[string]string, error) {
// InitServer initializes a server using a data byte array. This will be
// marshaled into the given struct using a YAML marshaler. This will also
// configure the given environment for a server.
func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfigurationResponse) (*Server, error) {
func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server, error) {
s, err := New(m.client)
if err != nil {
return nil, err
@@ -197,15 +178,7 @@ func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfiguratio
return nil, errors.WithStackIf(err)
}
s.fs = filesystem.New(s.ID(), s.DiskSpace(), s.Config().Egg.FileDenylist)
// If this is a virtual filesystem we need to go ahead and mount the disk
// so that everything is accessible.
if s.fs.IsVirtual() && !m.skipVhdInitialization {
log.WithField("server", s.ID()).Info("mounting virtual disk for server")
if err := s.fs.MountDisk(ctx); err != nil {
return nil, err
}
}
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
// Right now we only support a Docker based environment, so I'm going to hard code
// this logic in. When we're ready to support other environment we'll need to make
@@ -267,7 +240,7 @@ func (m *Manager) init(ctx context.Context) error {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
return
}
s, err := m.InitServer(ctx, d)
s, err := m.InitServer(d)
if err != nil {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
return

View File

@@ -8,7 +8,6 @@ import (
"emperror.dev/errors"
"github.com/google/uuid"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
)

View File

@@ -4,7 +4,6 @@ import (
"testing"
. "github.com/franela/goblin"
"github.com/pterodactyl/wings/system"
)

View File

@@ -179,8 +179,6 @@ func (s *Server) Log() *log.Entry {
//
// This also means mass actions can be performed against servers on the Panel
// and they will automatically sync with Wings when the server is started.
//
// TODO: accept a context value rather than using the server's context.
func (s *Server) Sync() error {
cfg, err := s.client.GetServerConfiguration(s.Context(), s.ID())
if err != nil {
@@ -196,9 +194,7 @@ func (s *Server) Sync() error {
// Update the disk space limits for the server whenever the configuration for
// it changes.
if err := s.fs.SetDiskLimit(s.Context(), s.DiskSpace()); err != nil {
return errors.WrapIf(err, "server: failed to sync server configuration from API")
}
s.fs.SetDiskLimit(s.DiskSpace())
s.SyncWithEnvironment()

View File

@@ -1,59 +0,0 @@
package sftp
import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
)
type eventHandler struct {
ip string
user string
server string
}
type FileAction struct {
// Entity is the targeted file or directory (depending on the event) that the action
// is being performed _against_, such as "/foo/test.txt". This will always be the full
// path to the element.
Entity string
// Target is an optional (often blank) field that only has a value in it when the event
// is specifically modifying the entity, such as a rename or move event. In that case
// the Target field will be the final value, such as "/bar/new.txt"
Target string
}
// Log parses a SFTP specific file activity event and then passes it off to be stored
// in the normal activity database.
func (eh *eventHandler) Log(e models.Event, fa FileAction) error {
metadata := map[string]interface{}{
"files": []string{fa.Entity},
}
if fa.Target != "" {
metadata["files"] = []map[string]string{
{"from": fa.Entity, "to": fa.Target},
}
}
a := models.Activity{
Server: eh.server,
Event: e,
Metadata: metadata,
IP: eh.ip,
}
if tx := database.Instance().Create(a.SetUser(eh.user)); tx.Error != nil {
return errors.WithStack(tx.Error)
}
return nil
}
// MustLog is a wrapper around log that will trigger a fatal error and exit the application
// if an error is encountered during the logging of the event.
func (eh *eventHandler) MustLog(e models.Event, fa FileAction) {
if err := eh.Log(e, fa); err != nil {
log.WithField("error", errors.WithStack(err)).WithField("event", e).Error("sftp: failed to log event")
}
}

View File

@@ -27,40 +27,32 @@ const (
)
type Handler struct {
mu sync.Mutex
mu sync.Mutex
permissions []string
server *server.Server
fs *filesystem.Filesystem
events *eventHandler
permissions []string
logger *log.Entry
ro bool
}
// NewHandler returns a new connection handler for the SFTP server. This allows a given user
// Returns a new connection handler for the SFTP server. This allows a given user
// to access the underlying filesystem.
func NewHandler(sc *ssh.ServerConn, srv *server.Server) (*Handler, error) {
uuid, ok := sc.Permissions.Extensions["user"]
if !ok {
return nil, errors.New("sftp: mismatched Wings and Panel versions — Panel 1.10 is required for this version of Wings.")
}
events := eventHandler{
ip: sc.RemoteAddr().String(),
user: uuid,
server: srv.ID(),
}
func NewHandler(sc *ssh.ServerConn, srv *server.Server) *Handler {
return &Handler{
permissions: strings.Split(sc.Permissions.Extensions["permissions"], ","),
server: srv,
fs: srv.Filesystem(),
events: &events,
ro: config.Get().System.Sftp.ReadOnly,
logger: log.WithFields(log.Fields{"subsystem": "sftp", "user": uuid, "ip": sc.RemoteAddr()}),
}, nil
logger: log.WithFields(log.Fields{
"subsystem": "sftp",
"username": sc.User(),
"ip": sc.RemoteAddr(),
}),
}
}
// Handlers returns the sftp.Handlers for this struct.
// Returns the sftp.Handlers for this struct.
func (h *Handler) Handlers() sftp.Handlers {
return sftp.Handlers{
FileGet: h,
@@ -129,12 +121,7 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
}
// Chown may or may not have been called in the touch function, so always do
// it at this point to avoid the file being improperly owned.
_ = h.fs.Chown(request.Filepath)
event := server.ActivitySftpWrite
if permission == PermissionFileCreate {
event = server.ActivitySftpCreate
}
h.events.MustLog(event, FileAction{Entity: request.Filepath})
_ = h.server.Filesystem().Chown(request.Filepath)
return f, nil
}
@@ -185,7 +172,6 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
l.WithField("error", err).Error("failed to rename file")
return sftp.ErrSSHFxFailure
}
h.events.MustLog(server.ActivitySftpRename, FileAction{Entity: request.Filepath, Target: request.Target})
break
// Handle deletion of a directory. This will properly delete all of the files and
// folders within that directory if it is not already empty (unlike a lot of SFTP
@@ -194,12 +180,10 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
if !h.can(PermissionFileDelete) {
return sftp.ErrSSHFxPermissionDenied
}
p := filepath.Clean(request.Filepath)
if err := h.fs.Delete(p); err != nil {
if err := h.fs.Delete(request.Filepath); err != nil {
l.WithField("error", err).Error("failed to remove directory")
return sftp.ErrSSHFxFailure
}
h.events.MustLog(server.ActivitySftpDelete, FileAction{Entity: request.Filepath})
return sftp.ErrSSHFxOk
// Handle requests to create a new Directory.
case "Mkdir":
@@ -207,12 +191,11 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
return sftp.ErrSSHFxPermissionDenied
}
name := strings.Split(filepath.Clean(request.Filepath), "/")
p := strings.Join(name[0:len(name)-1], "/")
if err := h.fs.CreateDirectory(name[len(name)-1], p); err != nil {
err := h.fs.CreateDirectory(name[len(name)-1], strings.Join(name[0:len(name)-1], "/"))
if err != nil {
l.WithField("error", err).Error("failed to create directory")
return sftp.ErrSSHFxFailure
}
h.events.MustLog(server.ActivitySftpCreateDirectory, FileAction{Entity: request.Filepath})
break
// Support creating symlinks between files. The source and target must resolve within
// the server home directory.
@@ -245,7 +228,6 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
l.WithField("error", err).Error("failed to remove a file")
return sftp.ErrSSHFxFailure
}
h.events.MustLog(server.ActivitySftpDelete, FileAction{Entity: request.Filepath})
return sftp.ErrSSHFxOk
default:
return sftp.ErrSSHFxOpUnsupported
@@ -305,10 +287,15 @@ func (h *Handler) can(permission string) bool {
if h.server.IsSuspended() {
return false
}
// SFTPServer owners and super admins have their permissions returned as '[*]' via the Panel
// API, so for the sake of speed do an initial check for that before iterating over the
// entire array of permissions.
if len(h.permissions) == 1 && h.permissions[0] == "*" {
return true
}
for _, p := range h.permissions {
// If we match the permission specifically, or the user has been granted the "*"
// permission because they're an admin, let them through.
if p == permission || p == "*" {
if p == permission {
return true
}
}

View File

@@ -91,21 +91,19 @@ func (c *SFTPServer) Run() error {
if conn, _ := listener.Accept(); conn != nil {
go func(conn net.Conn) {
defer conn.Close()
if err := c.AcceptInbound(conn, conf); err != nil {
log.WithField("error", err).Error("sftp: failed to accept inbound connection")
}
c.AcceptInbound(conn, conf)
}(conn)
}
}
}
// AcceptInbound handles an inbound connection to the instance and determines if we should
// serve the request or not.
func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) error {
// Handles an inbound connection to the instance and determines if we should serve the
// request or not.
func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
// Before beginning a handshake must be performed on the incoming net.Conn
sconn, chans, reqs, err := ssh.NewServerConn(conn, config)
if err != nil {
return errors.WithStack(err)
return
}
defer sconn.Close()
go ssh.DiscardRequests(reqs)
@@ -151,17 +149,11 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) erro
// Spin up a SFTP server instance for the authenticated user's server allowing
// them access to the underlying filesystem.
handler, err := NewHandler(sconn, srv)
if err != nil {
return errors.WithStackIf(err)
}
rs := sftp.NewRequestServer(channel, handler.Handlers())
if err := rs.Serve(); err == io.EOF {
_ = rs.Close()
handler := sftp.NewRequestServer(channel, NewHandler(sconn, srv).Handlers())
if err := handler.Serve(); err == io.EOF {
handler.Close()
}
}
return nil
}
// Generates a new ED25519 private key that is used for host authentication when
@@ -221,9 +213,8 @@ func (c *SFTPServer) makeCredentialsRequest(conn ssh.ConnMetadata, t remote.Sftp
logger.WithField("server", resp.Server).Debug("credentials validated and matched to server instance")
permissions := ssh.Permissions{
Extensions: map[string]string{
"ip": conn.RemoteAddr().String(),
"uuid": resp.Server,
"user": resp.User,
"user": conn.User(),
"permissions": strings.Join(resp.Permissions, ","),
},
}

View File

@@ -1,3 +1,3 @@
package system
var Version = "develop"
var Version = "1.6.2"

View File

@@ -23,7 +23,7 @@ type SinkPool struct {
}
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
// server instance for its full lifetime.
// server instance for it's full lifetime.
func NewSinkPool() *SinkPool {
return &SinkPool{}
}

View File

@@ -1,29 +0,0 @@
package system
import (
"math/rand"
"regexp"
"strings"
)
var ipTrimRegex = regexp.MustCompile(`(:\d*)?$`)
const characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
// RandomString generates a random string of alpha-numeric characters using a
// pseudo-random number generator. The output of this function IS NOT cryptographically
// secure, it is used solely for generating random strings outside a security context.
func RandomString(n int) string {
var b strings.Builder
b.Grow(n)
for i := 0; i < n; i++ {
b.WriteByte(characters[rand.Intn(len(characters))])
}
return b.String()
}
// TrimIPSuffix removes the internal port value from an IP address to ensure we're only
// ever working directly with the IP address.
func TrimIPSuffix(s string) string {
return ipTrimRegex.ReplaceAllString(s, "")
}

View File

@@ -1,18 +1,9 @@
package main
import (
"math/rand"
"time"
"github.com/pterodactyl/wings/cmd"
)
func main() {
// Since we make use of the math/rand package in the code, especially for generating
// non-cryptographically secure random strings we need to seed the RNG. Just make use
// of the current time for this.
rand.Seed(time.Now().UnixNano())
// Execute the main binary code.
cmd.Execute()
}