Compare commits

..

1 Commits

Author SHA1 Message Date
Pterodactyl CI
be5ad761ea bump version for release 2022-01-31 01:31:09 +00:00
86 changed files with 1201 additions and 3274 deletions

3
.github/FUNDING.yml vendored
View File

@@ -1 +1,2 @@
github: [ matthewpi ]
github: [ DaneEveritt ]
custom: [ "https://paypal.me/PterodactylSoftware" ]

View File

@@ -12,7 +12,7 @@ jobs:
fail-fast: false
matrix:
os: [ ubuntu-20.04 ]
go: [ '1.18.7' ]
go: [ '^1.17' ]
goos: [ linux ]
goarch: [ amd64, arm64 ]
runs-on: ${{ matrix.os }}
@@ -58,6 +58,7 @@ jobs:
run: |
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH} wings.go
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH}_debug wings.go
upx build/wings_${GOOS}_${{ matrix.goarch }}
chmod +x build/*
- name: Tests
run: go test -race ./...

View File

@@ -11,7 +11,7 @@ jobs:
uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: '1.18.7'
go-version: '^1.17'
- name: Build
env:
REF: ${{ github.ref }}
@@ -22,8 +22,8 @@ jobs:
run: go test ./...
- name: Compress binary and make it executable
run: |
chmod +x build/wings_linux_amd64
chmod +x build/wings_linux_arm64
upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
upx build/wings_linux_arm64 && chmod +x build/wings_linux_arm64
- name: Extract changelog
env:
REF: ${{ github.ref }}

View File

@@ -1,65 +1,5 @@
# Changelog
## v1.7.2
### Fixed
* The S3 backup driver now supports Cloudflare R2
### Added
* During a server transfer, there is a new "Archiving" status that outputs the progress of creating the server transfer archive.
* Adds a configuration option to control the list of trusted proxies that can be used to determine the client IP address.
* Adds a configuration option to control the Docker username space setting when Wings creates containers.
### Changed
* Releases are now built using `Go 1.18` — the minimum version required to build Wings is now `Go 1.18`.
## v1.7.1
### Fixed
* YAML parser has been updated to fix some strange issues
### Added
* Added `Force Outgoing IP` option for servers to ensure outgoing traffic uses the server's IP address
* Adds an option to control the level of gzip compression for backups
## v1.7.0
### Fixed
* Fixes multi-platform support for Wings' Docker image.
### Added
* Adds support for tracking of SFTP actions, power actions, server commands, and file uploads by utilizing a local SQLite database and processing events before sending them to the Panel.
* Adds support for configuring the MTU on the `pterodactyl0` network.
## v1.6.4
### Fixed
* Fixes a bug causing CPU limiting to not be properly applied to servers.
* Fixes a bug causing zip archives to decompress without taking into account nested folder structures.
## v1.6.3
### Fixed
* Fixes SFTP authentication failing for administrative users due to a permissions adjustment on the Panel.
## v1.6.2
### Fixed
* Fixes file upload size not being properly enforced.
* Fixes a bug that prevented listing a directory when it contained a named pipe. Also added a check to prevent attempting to read a named pipe directly.
* Fixes a bug with the archiver logic that would include folders that had the same name prefix. (for example, requesting only `map` would also include `map2` and `map3`)
* Requests to the Panel that return a client error (4xx response code) no longer trigger an exponential backoff, they immediately stop the request.
### Changed
* CPU limit fields are only set on the Docker container if they have been specified for the server — otherwise they are left empty.
### Added
* Added the ability to define the location of the temporary folder used by Wings — defaults to `/tmp/pterodactyl`.
* Adds the ability to authenticate for SFTP using public keys (requires `Panel@1.8.0`).
## v1.6.1
### Fixed
* Fixes error that would sometimes occur when starting a server that would cause the temporary power action lock to never be released due to a blocked channel.
* Fixes a bug causing the CPU usage of Wings to get stuck at 100% when a server is deleted while the installation process is running.
### Changed
* Cleans up a lot of the logic for handling events between the server and environment process to make it easier to make modifications to down the road.
* Cleans up logic handling the `StopAndWait` logic for stopping a server gracefully before terminating the process if it does not respond.
## v1.6.0
### Fixed
* Internal logic for processing a server start event has been adjusted to attach to the Docker container before attempting to start the container. This should fix issues where a server would get stuck after pulling the container image.

View File

@@ -1,18 +1,19 @@
# Stage 1 (Build)
FROM golang:1.18-alpine AS builder
FROM --platform=$BUILDPLATFORM golang:1.17-alpine AS builder
ARG VERSION
RUN apk add --update --no-cache git make
RUN apk add --update --no-cache git make upx
WORKDIR /app/
COPY go.mod go.sum /app/
RUN go mod download
COPY . /app/
RUN CGO_ENABLED=0 go build \
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
-ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=$VERSION" \
-v \
-trimpath \
-o wings \
wings.go
RUN upx wings
RUN echo "ID=\"distroless\"" > /etc/os-release
# Stage 2 (Final)

View File

@@ -4,12 +4,9 @@ build:
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_amd64 -v wings.go
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
race:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
debug:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof
# Runs a remotly debuggable session for Wings allowing an IDE to connect and target
# different breakpoints.
@@ -17,6 +14,9 @@ rmdebug:
go build -gcflags "all=-N -l" -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml
compress:
upx --brute build/wings_*
cross-build: clean build compress
clean:

View File

@@ -14,22 +14,27 @@ dependencies, and allowing users to authenticate with the same credentials they
## Sponsors
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
[Interested in becoming a sponsor?](https://github.com/sponsors/DaneEveritt)
| Company | About |
| ------- | ----- |
| [**WISP**](https://wisp.gg) | Extra features. |
| [**MixmlHosting**](https://mixmlhosting.com) | MixmlHosting provides high quality Virtual Private Servers along with game servers, all at a affordable price. |
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
| [**Fragnet**](https://fragnet.net) | Providing low latency, high-end game hosting solutions to gamers, game studios and eSports platforms. |
| [**Tempest**](https://tempest.net/) | Tempest Hosting is a subsidiary of Path Network, Inc. offering unmetered DDoS protected 10Gbps dedicated servers, starting at just $80/month. Full anycast, tons of filters. |
| [**Bloom.host**](https://bloom.host) | Bloom.host offers dedicated core VPS and Minecraft hosting with Ryzen 9 processors. With owned-hardware, we offer truly unbeatable prices on high-performance hosting. |
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
| [**MineStrator**](https://minestrator.com/) | Looking for a French highend hosting company for you minecraft server? More than 14,000 members on our discord, trust us. |
| [**DedicatedMC**](https://dedicatedmc.io/) | DedicatedMC provides Raw Power hosting at affordable pricing, making sure to never compromise on your performance and giving you the best performance money can buy. |
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
| [**XCORE**](https://xcore-server.de/) | XCORE offers High-End Servers for hosting and gaming since 2012. Fast, excellent and well-known for eSports Gaming. |
| [**RoyaleHosting**](https://royalehosting.net/) | Build your dreams and deploy them with RoyaleHostings reliable servers and network. Easy to use, provisioned in a couple of minutes. |
| [**Spill Hosting**](https://spillhosting.no/) | Spill Hosting is a Norwegian hosting service, which aims for inexpensive services on quality servers. Premium i9-9900K processors will run your game like a dream. |
| [**DeinServerHost**](https://deinserverhost.de/) | DeinServerHost offers Dedicated, vps and Gameservers for many popular Games like Minecraft and Rust in Germany since 2013. |
| [**HostBend**](https://hostbend.com/) | HostBend offers a variety of solutions for developers, students, and others who have a tight budget but don't want to compromise quality and support. |
| [**Capitol Hosting Solutions**](https://chs.gg/) | CHS is *the* budget friendly hosting company for Australian and American gamers, offering a variety of plans from Web Hosting to Game Servers; Custom Solutions too! |
| [**ByteAnia**](https://byteania.com/?utm_source=pterodactyl) | ByteAnia offers the best performing and most affordable **Ryzen 5000 Series hosting** on the market for *unbeatable prices*! |
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
| [**HostEZ**](https://hostez.io) | Providing North America Valheim, Minecraft and other popular games with low latency, high uptime and maximum availability. EZ! |
| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa.|
| [**Gamenodes**](https://gamenodes.nl) | Gamenodes love quality. For Minecraft, Discord Bots and other services, among others. With our own programmers, we provide just that little bit of extra service! |
| [**RocketNode**](https://rocketnode.net) | RocketNode is a VPS and Game Server provider that offers the best performing VPS and Game hosting Solutions at affordable prices! |
## Documentation
* [Panel Documentation](https://pterodactyl.io/panel/1.0/getting_started.html)

View File

@@ -58,7 +58,7 @@ func newDiagnosticsCommand() *cobra.Command {
return command
}
// diagnosticsCmdRun collects diagnostics about wings, its configuration and the node.
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
// We collect:
// - wings and docker versions
// - relevant parts of daemon configuration

View File

@@ -1,127 +0,0 @@
package cmd
import (
"context"
"os"
"os/exec"
"strings"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/vhd"
"github.com/pterodactyl/wings/loggers/cli"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server"
"github.com/spf13/cobra"
)
type MigrateVHDCommand struct {
manager *server.Manager
}
func newMigrateVHDCommand() *cobra.Command {
return &cobra.Command{
Use: "migrate-vhd",
Short: "migrates existing data from a directory tree into virtual hard-disks",
PreRun: func(cmd *cobra.Command, args []string) {
log.SetLevel(log.DebugLevel)
log.SetHandler(cli.Default)
},
Run: func(cmd *cobra.Command, args []string) {
client := remote.NewFromConfig(config.Get())
manager, err := server.NewManager(cmd.Context(), client, true)
if err != nil {
log.WithField("error", err).Fatal("failed to create new server manager")
}
c := &MigrateVHDCommand{
manager: manager,
}
if err := c.Run(cmd.Context()); err != nil {
log.WithField("error", err).Fatal("failed to execute command")
}
},
}
}
// Run executes the migration command.
func (m *MigrateVHDCommand) Run(ctx context.Context) error {
if !vhd.Enabled() {
return errors.New("cannot migrate to vhd: the underlying driver must be set to \"vhd\"")
}
for _, s := range m.manager.All() {
s.Log().Debug("starting migration of server contents to virtual disk...")
v := vhd.New(s.DiskSpace(), vhd.DiskPath(s.ID()), s.Filesystem().Path())
s.Log().WithField("disk_image", v.Path()).Info("creating virtual disk for server")
if err := v.Allocate(ctx); err != nil {
return errors.WithStackIf(err)
}
s.Log().Info("creating virtual filesystem for server")
if err := v.MakeFilesystem(ctx); err != nil {
// If the filesystem already exists no worries, just move on with our
// day here.
if !errors.Is(err, vhd.ErrFilesystemExists) {
return errors.WithStack(err)
}
}
bak := strings.TrimSuffix(s.Filesystem().Path(), "/") + "_bak"
mounted, err := v.IsMounted(ctx)
if err != nil {
return err
} else if !mounted {
s.Log().WithField("backup_dir", bak).Debug("virtual disk is not yet mounted, creating backup directory")
// Create a backup directory of the server files if one does not already exist
// at that location. If one does exists we'll just assume it is good to go and
// rely on it to provide the files we'll need.
if _, err := os.Lstat(bak); os.IsNotExist(err) {
if err := os.Rename(s.Filesystem().Path(), bak); err != nil {
return errors.Wrap(err, "failed to rename existing data directory for backup")
}
} else if err != nil {
return errors.WithStack(err)
}
if err := os.RemoveAll(s.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "failed to remove base server files path")
}
} else {
s.Log().Warn("server appears to already have existing mount, not creating data backup")
}
// Attempt to mount the disk at the expected path now that we've created
// a backup of the server files.
if err := v.Mount(ctx); err != nil && !errors.Is(err, vhd.ErrFilesystemMounted) {
return errors.WithStackIf(err)
}
// Copy over the files from the backup for this server but only
// if we have a backup directory currently.
_, err = os.Lstat(bak)
if err != nil {
if !os.IsNotExist(err) {
s.Log().WithField("error", err).Warn("failed to stat backup directory")
} else {
s.Log().Info("no backup data directory exists, not restoring files")
}
} else {
cmd := exec.CommandContext(ctx, "cp", "-r", bak+"/.", s.Filesystem().Path())
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "migrate: failed to move old server files into new direcotry")
} else {
if err := os.RemoveAll(bak); err != nil {
s.Log().WithField("directory", bak).WithField("error", err).Warn("failed to remove backup directory")
}
}
}
s.Log().Info("updating server file ownership...")
if err := s.Filesystem().Chown("/"); err != nil {
s.Log().WithField("error", err).Warn("failed to update ownership of new server files")
}
s.Log().Info("finished migration to virtual disk...")
}
return nil
}

View File

@@ -11,14 +11,10 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/pterodactyl/wings/internal/cron"
"github.com/pterodactyl/wings/internal/database"
"github.com/NYTimes/logrotate"
"github.com/apex/log"
"github.com/apex/log/handlers/multi"
@@ -47,16 +43,8 @@ var (
var rootCommand = &cobra.Command{
Use: "wings",
Short: "Runs the API server allowing programmatic control of game servers for Pterodactyl Panel.",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
initConfig()
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
},
PreRun: func(cmd *cobra.Command, args []string) {
initConfig()
initLogging()
if tls, _ := cmd.Flags().GetBool("auto-tls"); tls {
if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" {
@@ -85,19 +73,17 @@ func Execute() {
func init() {
rootCommand.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
rootCommand.PersistentFlags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
// Flags specifically used when running the API.
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
rootCommand.AddCommand(versionCommand)
rootCommand.AddCommand(configureCmd)
rootCommand.AddCommand(newDiagnosticsCommand())
rootCommand.AddCommand(newMigrateVHDCommand())
}
func rootCmdRun(cmd *cobra.Command, _ []string) {
@@ -105,6 +91,13 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
log.Debug("running in debug mode")
log.WithField("config_file", configPath).Info("loading configuration from file")
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
if err := config.ConfigureTimezone(); err != nil {
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
}
@@ -135,11 +128,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
}),
)
if err := database.Initialize(); err != nil {
log.WithField("error", err).Fatal("failed to initialize database")
}
manager, err := server.NewManager(cmd.Context(), pclient, false)
manager, err := server.NewManager(cmd.Context(), pclient)
if err != nil {
log.WithField("error", err).Fatal("failed to load server configurations")
}
@@ -165,7 +154,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
ticker := time.NewTicker(time.Minute)
// Every minute, write the current server states to the disk to allow for a more
// seamless hard-reboot process in which wings will re-sync server states based
// on its last tracked state.
// on it's last tracked state.
go func() {
for {
select {
@@ -268,13 +257,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
}
}()
if s, err := cron.Scheduler(cmd.Context(), manager); err != nil {
log.WithField("error", err).Fatal("failed to initialize cron system")
} else {
log.WithField("subsystem", "cron").Info("starting cron processes")
s.StartAsync()
}
go func() {
// Run the SFTP server.
if err := sftp.New(manager).Run(); err != nil {
@@ -327,12 +309,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
profile, _ := cmd.Flags().GetBool("pprof")
if profile {
if r, _ := cmd.Flags().GetInt("pprof-block-rate"); r > 0 {
runtime.SetBlockProfileRate(r)
}
// Catch at least 1% of mutex contention issues.
runtime.SetMutexProfileFraction(100)
profilePort, _ := cmd.Flags().GetInt("pprof-port")
go func() {
http.ListenAndServe(fmt.Sprintf("localhost:%d", profilePort), nil)

View File

@@ -89,11 +89,8 @@ type ApiConfiguration struct {
// servers.
DisableRemoteDownload bool `json:"disable_remote_download" yaml:"disable_remote_download"`
// The maximum size for files uploaded through the Panel in MB.
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
// A list of IP address of proxies that may send a X-Forwarded-For header to set the true clients IP
TrustedProxies []string `json:"trusted_proxies" yaml:"trusted_proxies"`
// The maximum size for files uploaded through the Panel in bytes.
UploadLimit int `default:"100" json:"upload_limit" yaml:"upload_limit"`
}
// RemoteQueryConfiguration defines the configuration settings for remote requests
@@ -135,10 +132,6 @@ type SystemConfiguration struct {
// Directory where local backups will be stored on the machine.
BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
// TmpDirectory specifies where temporary files for Pterodactyl installation processes
// should be created. This supports environments running docker-in-docker.
TmpDirectory string `default:"/tmp/pterodactyl" yaml:"tmp_directory"`
// The user that should own all of the server files, and be used for containers.
Username string `default:"pterodactyl" yaml:"username"`
@@ -166,15 +159,6 @@ type SystemConfiguration struct {
// disk usage is not a concern.
DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"`
// ActivitySendInterval is the amount of time that should ellapse between aggregated server activity
// being sent to the Panel. By default this will send activity collected over the last minute. Keep
// in mind that only a fixed number of activity log entries, defined by ActivitySendCount, will be sent
// in each run.
ActivitySendInterval int `default:"60" yaml:"activity_send_interval"`
// ActivitySendCount is the number of activity events to send per batch.
ActivitySendCount int `default:"100" yaml:"activity_send_count"`
// If set to true, file permissions for a server will be checked when the process is
// booted. This can cause boot delays if the server has a large amount of files. In most
// cases disabling this should not have any major impact unless external processes are
@@ -222,15 +206,6 @@ type Backups struct {
//
// Defaults to 0 (unlimited)
WriteLimit int `default:"0" yaml:"write_limit"`
// CompressionLevel determines how much backups created by wings should be compressed.
//
// "none" -> no compression will be applied
// "best_speed" -> uses gzip level 1 for fast speed
// "best_compression" -> uses gzip level 9 for minimal disk space useage
//
// Defaults to "best_speed" (level 1)
CompressionLevel string `default:"best_speed" yaml:"compression_level"`
}
type Transfers struct {
@@ -305,11 +280,6 @@ type Configuration struct {
// is only required by users running Wings without SSL certificates and using internal IP
// addresses in order to connect. Most users should NOT enable this setting.
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
// Servers contains all of the settings that are used when configuring individual servers
// on the system. This is a global configuration for all server instances, not to be confused
// with the per-server configurations provided by the Panel API.
Servers Servers `json:"servers" yaml:"servers"`
}
// NewAtPath creates a new struct and set the path where it should be stored.

View File

@@ -36,7 +36,6 @@ type DockerNetworkConfiguration struct {
Mode string `default:"pterodactyl_nw" yaml:"network_mode"`
IsInternal bool `default:"false" yaml:"is_internal"`
EnableICC bool `default:"true" yaml:"enable_icc"`
NetworkMTU int64 `default:"1500" yaml:"network_mtu"`
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
}
@@ -78,14 +77,6 @@ type DockerConfiguration struct {
Overhead Overhead `json:"overhead" yaml:"overhead"`
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
// Sets the user namespace mode for the container when user namespace remapping option is
// enabled.
//
// If the value is blank, the daemon's user namespace remapping configuration is used,
// if the value is "host", then the pterodactyl containers are started with user namespace
// remapping disabled
UsernsMode string `default:"" json:"userns_mode" yaml:"userns_mode"`
}
// RegistryConfiguration defines the authentication credentials for a given

View File

@@ -1,28 +0,0 @@
package config
type FSDriver string
const (
FSDriverLocal FSDriver = "local"
FSDriverVHD FSDriver = "vhd"
)
type Servers struct {
// Filesystem defines all of the filesystem specific settings used for servers.
Filesystem Filesystem `json:"filesystem" yaml:"filesystem"`
}
type Filesystem struct {
// Driver defines the underlying filesystem driver that is used when a server is
// created on the system. This currently supports either of the following drivers:
//
// local: the local driver is the default one used by Wings. This offloads all of the
// disk limit enforcement to Wings itself. This has a performance impact but is
// the most compatiable with all systems.
// vhd: the vhd driver uses "virtual" disks on the host system to enforce disk limits
// on the server. This is more performant since calculations do not need to be made
// by Wings itself when enforcing limits. It also avoids vulnerabilities that exist
// in the local driver which allow malicious processes to quickly create massive files
// before Wings is able to detect and stop them from being written.
Driver FSDriver `default:"local" json:"driver" yaml:"driver"`
}

View File

@@ -12,11 +12,6 @@ import (
// Defines the allocations available for a given server. When using the Docker environment
// driver these correspond to mappings for the container that allow external connections.
type Allocations struct {
// ForceOutgoingIP causes a dedicated bridge network to be created for the
// server with a special option, causing Docker to SNAT outgoing traffic to
// the DefaultMapping's IP. This is important to servers which rely on external
// services that check the IP of the server (Source Engine servers, for example).
ForceOutgoingIP bool `json:"force_outgoing_ip"`
// Defines the default allocation that should be used for this server. This is
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
// files or the startup arguments for a server.

View File

@@ -41,12 +41,12 @@ func ConfigureDocker(ctx context.Context) error {
nw := config.Get().Docker.Network
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
if err != nil {
if !client.IsErrNotFound(err) {
return err
}
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
if err := createDockerNetwork(ctx, cli); err != nil {
if client.IsErrNotFound(err) {
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
if err := createDockerNetwork(ctx, cli); err != nil {
return err
}
} else {
return err
}
}
@@ -92,7 +92,7 @@ func createDockerNetwork(ctx context.Context, cli *client.Client) error {
"com.docker.network.bridge.enable_ip_masquerade": "true",
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
"com.docker.network.bridge.name": "pterodactyl0",
"com.docker.network.driver.mtu": strconv.FormatInt(nw.NetworkMTU, 10),
"com.docker.network.driver.mtu": "1500",
},
})
if err != nil {

View File

@@ -14,7 +14,6 @@ import (
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
)
@@ -74,9 +73,6 @@ func (e *Environment) ContainerInspect(ctx context.Context) (types.ContainerJSON
res, err := e.client.HTTPClient().Do(req)
if err != nil {
if res == nil {
return st, errdefs.Unknown(err)
}
return st, errdefs.FromStatusCode(err, res.StatusCode)
}

View File

@@ -147,12 +147,10 @@ func (e *Environment) InSituUpdate() error {
// currently available for it. If the container already exists it will be
// returned.
func (e *Environment) Create() error {
ctx := context.Background()
// If the container already exists don't hit the user with an error, just return
// the current information about it which is what we would do when creating the
// container anyways.
if _, err := e.ContainerInspect(ctx); err == nil {
if _, err := e.ContainerInspect(context.Background()); err == nil {
return nil
} else if !client.IsErrNotFound(err) {
return errors.Wrap(err, "environment/docker: failed to inspect container")
@@ -192,34 +190,7 @@ func (e *Environment) Create() error {
},
}
networkMode := container.NetworkMode(config.Get().Docker.Network.Mode)
if a.ForceOutgoingIP {
e.log().Debug("environment/docker: forcing outgoing IP address")
networkName := strings.ReplaceAll(e.Id, "-", "")
networkMode = container.NetworkMode(networkName)
if _, err := e.client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}); err != nil {
if !client.IsErrNotFound(err) {
return err
}
if _, err := e.client.NetworkCreate(ctx, networkName, types.NetworkCreate{
Driver: "bridge",
EnableIPv6: false,
Internal: false,
Attachable: false,
Ingress: false,
ConfigOnly: false,
Options: map[string]string{
"encryption": "false",
"com.docker.network.bridge.default_bridge": "false",
"com.docker.network.host_ipv4": a.DefaultMapping.Ip,
},
}); err != nil {
return err
}
}
}
tmpfsSize := strconv.Itoa(int(config.Get().Docker.TmpfsSize))
hostConf := &container.HostConfig{
PortBindings: a.DockerBindings(),
@@ -231,7 +202,7 @@ func (e *Environment) Create() error {
// Configure the /tmp folder mapping in containers. This is necessary for some
// games that need to make use of it for downloads and other installation processes.
Tmpfs: map[string]string{
"/tmp": "rw,exec,nosuid,size=" + strconv.Itoa(int(config.Get().Docker.TmpfsSize)) + "M",
"/tmp": "rw,exec,nosuid,size=" + tmpfsSize + "M",
},
// Define resource limits for the container based on the data passed through
@@ -260,11 +231,10 @@ func (e *Environment) Create() error {
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
},
NetworkMode: networkMode,
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
}
if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil {
return errors.Wrap(err, "environment/docker: failed to create container")
}
@@ -510,3 +480,21 @@ func (e *Environment) convertMounts() []mount.Mount {
return out
}
func (e *Environment) resources() container.Resources {
l := e.Configuration.Limits()
pids := l.ProcessLimit()
return container.Resources{
Memory: l.BoundedMemoryLimit(),
MemoryReservation: l.MemoryLimit * 1_000_000,
MemorySwap: l.ConvertedSwap(),
CPUQuota: l.ConvertedCpuLimit(),
CPUPeriod: 100_000,
CPUShares: 1024,
BlkioWeight: l.IoWeight,
OomKillDisable: &l.OOMDisabled,
CpusetCpus: l.Threads,
PidsLimit: &pids,
}
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote"
@@ -27,7 +26,7 @@ type Metadata struct {
var _ environment.ProcessEnvironment = (*Environment)(nil)
type Environment struct {
mu sync.RWMutex
mu sync.RWMutex
// The public identifier for this environment. In this case it is the Docker container
// name that will be used for all instances created under it.

View File

@@ -138,7 +138,9 @@ func (e *Environment) Start(ctx context.Context) error {
// You most likely want to be using WaitForStop() rather than this function,
// since this will return as soon as the command is sent, rather than waiting
// for the process to be completed stopped.
func (e *Environment) Stop(ctx context.Context) error {
//
// TODO: pass context through from the server instance.
func (e *Environment) Stop() error {
e.mu.RLock()
s := e.meta.Stop
e.mu.RUnlock()
@@ -162,7 +164,7 @@ func (e *Environment) Stop(ctx context.Context) error {
case "SIGTERM":
signal = syscall.SIGTERM
}
return e.Terminate(ctx, signal)
return e.Terminate(signal)
}
// If the process is already offline don't switch it back to stopping. Just leave it how
@@ -177,10 +179,8 @@ func (e *Environment) Stop(ctx context.Context) error {
return e.SendCommand(s.Value)
}
// Allow the stop action to run for however long it takes, similar to executing a command
// and using a different logic pathway to wait for the container to stop successfully.
t := time.Duration(-1)
if err := e.client.ContainerStop(ctx, e.Id, &t); err != nil {
t := time.Second * 30
if err := e.client.ContainerStop(context.Background(), e.Id, &t); err != nil {
// If the container does not exist just mark the process as stopped and return without
// an error.
if client.IsErrNotFound(err) {
@@ -198,66 +198,45 @@ func (e *Environment) Stop(ctx context.Context) error {
// command. If the server does not stop after seconds have passed, an error will
// be returned, or the instance will be terminated forcefully depending on the
// value of the second argument.
//
// Calls to Environment.Terminate() in this function use the context passed
// through since we don't want to prevent termination of the server instance
// just because the context.WithTimeout() has expired.
func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, terminate bool) error {
tctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
// If the parent context is canceled, abort the timed context for termination.
go func() {
select {
case <-ctx.Done():
cancel()
case <-tctx.Done():
// When the timed context is canceled, terminate this routine since we no longer
// need to worry about the parent routine being canceled.
break
}
}()
doTermination := func(s string) error {
e.log().WithField("step", s).WithField("duration", duration).Warn("container stop did not complete in time, terminating process...")
return e.Terminate(ctx, os.Kill)
}
// We pass through the timed context for this stop action so that if one of the
// internal docker calls fails to ever finish before we've exhausted the time limit
// the resources get cleaned up, and the exection is stopped.
if err := e.Stop(tctx); err != nil {
if terminate && errors.Is(err, context.DeadlineExceeded) {
return doTermination("stop")
}
func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
if err := e.Stop(); err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second)
defer cancel()
// Block the return of this function until the container as been marked as no
// longer running. If this wait does not end by the time seconds have passed,
// attempt to terminate the container, or return an error.
ok, errChan := e.client.ContainerWait(tctx, e.Id, container.WaitConditionNotRunning)
ok, errChan := e.client.ContainerWait(ctx, e.Id, container.WaitConditionNotRunning)
select {
case <-ctx.Done():
if err := ctx.Err(); err != nil {
if ctxErr := ctx.Err(); ctxErr != nil {
if terminate {
return doTermination("parent-context")
log.WithField("container_id", e.Id).Info("server did not stop in time, executing process termination")
return e.Terminate(os.Kill)
}
return err
return ctxErr
}
case err := <-errChan:
// If the error stems from the container not existing there is no point in wasting
// CPU time to then try and terminate it.
if err == nil || client.IsErrNotFound(err) {
return nil
}
if terminate {
if !errors.Is(err, context.DeadlineExceeded) {
e.log().WithField("error", err).Warn("error while waiting for container stop; terminating process")
if err != nil && !client.IsErrNotFound(err) {
if terminate {
l := log.WithField("container_id", e.Id)
if errors.Is(err, context.DeadlineExceeded) {
l.Warn("deadline exceeded for container stop; terminating process")
} else {
l.WithField("error", err).Warn("error while waiting for container stop; terminating process")
}
return e.Terminate(os.Kill)
}
return doTermination("wait")
return errors.WrapIf(err, "environment/docker: error waiting on container to enter \"not-running\" state")
}
return errors.WrapIf(err, "environment/docker: error waiting on container to enter \"not-running\" state")
case <-ok:
}
@@ -265,8 +244,8 @@ func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, t
}
// Terminate forcefully terminates the container using the signal provided.
func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
c, err := e.ContainerInspect(ctx)
func (e *Environment) Terminate(signal os.Signal) error {
c, err := e.ContainerInspect(context.Background())
if err != nil {
// Treat missing containers as an okay error state, means it is obviously
// already terminated at this point.
@@ -291,7 +270,7 @@ func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
// We set it to stopping than offline to prevent crash detection from being triggered.
e.SetState(environment.ProcessStoppingState)
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
if err := e.client.ContainerKill(ctx, e.Id, sig); err != nil && !client.IsErrNotFound(err) {
if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil && !client.IsErrNotFound(err) {
return errors.WithStack(err)
}
e.SetState(environment.ProcessOfflineState)

View File

@@ -3,7 +3,6 @@ package environment
import (
"context"
"os"
"time"
"github.com/pterodactyl/wings/events"
)
@@ -59,20 +58,18 @@ type ProcessEnvironment interface {
// can be started an error should be returned.
Start(ctx context.Context) error
// Stop stops a server instance. If the server is already stopped an error will
// not be returned, this function will act as a no-op.
Stop(ctx context.Context) error
// Stops a server instance. If the server is already stopped an error should
// not be returned.
Stop() error
// WaitForStop waits for a server instance to stop gracefully. If the server is
// still detected as running after "duration", an error will be returned, or the server
// will be terminated depending on the value of the second argument. If the context
// provided is canceled the underlying wait conditions will be stopped and the
// entire loop will be ended (potentially without stopping or terminating).
WaitForStop(ctx context.Context, duration time.Duration, terminate bool) error
// Waits for a server instance to stop gracefully. If the server is still detected
// as running after seconds, an error will be returned, or the server will be terminated
// depending on the value of the second argument.
WaitForStop(seconds uint, terminate bool) error
// Terminate stops a running server instance using the provided signal. This function
// is a no-op if the server is already stopped.
Terminate(ctx context.Context, signal os.Signal) error
// Terminates a running server instance using the provided signal. If the server
// is not running no error should be returned.
Terminate(signal os.Signal) error
// Destroys the environment removing any containers that were created (in Docker
// environments at least).

View File

@@ -99,36 +99,21 @@ func (l Limits) ProcessLimit() int64 {
return config.Get().Docker.ContainerPidLimit
}
// AsContainerResources returns the available resources for a container in a format
// that Docker understands.
func (l Limits) AsContainerResources() container.Resources {
pids := l.ProcessLimit()
resources := container.Resources{
return container.Resources{
Memory: l.BoundedMemoryLimit(),
MemoryReservation: l.MemoryLimit * 1_000_000,
MemorySwap: l.ConvertedSwap(),
CPUQuota: l.ConvertedCpuLimit(),
CPUPeriod: 100_000,
CPUShares: 1024,
BlkioWeight: l.IoWeight,
OomKillDisable: &l.OOMDisabled,
CpusetCpus: l.Threads,
PidsLimit: &pids,
}
// If the CPU Limit is not set, don't send any of these fields through. Providing
// them seems to break some Java services that try to read the available processors.
//
// @see https://github.com/pterodactyl/panel/issues/3988
if l.CpuLimit > 0 {
resources.CPUQuota = l.CpuLimit * 1_000
resources.CPUPeriod = 100_000
resources.CPUShares = 1024
}
// Similar to above, don't set the specific assigned CPUs if we didn't actually limit
// the server to any of them.
if l.Threads != "" {
resources.CpusetCpus = l.Threads
}
return resources
}
type Variables map[string]interface{}

View File

@@ -2,13 +2,11 @@ package events
import (
"strings"
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
"sync"
)
type Listener chan Event
// Event represents an Event sent over a Bus.
type Event struct {
Topic string
@@ -17,55 +15,137 @@ type Event struct {
// Bus represents an Event Bus.
type Bus struct {
*system.SinkPool
listenersMx sync.Mutex
listeners map[string][]Listener
}
// NewBus returns a new empty Bus. This is simply a nicer wrapper around the
// system.SinkPool implementation that allows for more simplistic usage within
// the codebase.
//
// All of the events emitted out of this bus are byte slices that can be decoded
// back into an events.Event interface.
// NewBus returns a new empty Event Bus.
func NewBus() *Bus {
return &Bus{
system.NewSinkPool(),
listeners: make(map[string][]Listener),
}
}
// Off unregisters a listener from the specified topics on the Bus.
func (b *Bus) Off(listener Listener, topics ...string) {
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
var closed bool
for _, topic := range topics {
ok := b.off(topic, listener)
if !closed && ok {
close(listener)
closed = true
}
}
}
func (b *Bus) off(topic string, listener Listener) bool {
listeners, ok := b.listeners[topic]
if !ok {
return false
}
for i, l := range listeners {
if l != listener {
continue
}
listeners = append(listeners[:i], listeners[i+1:]...)
b.listeners[topic] = listeners
return true
}
return false
}
// On registers a listener to the specified topics on the Bus.
func (b *Bus) On(listener Listener, topics ...string) {
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
for _, topic := range topics {
b.on(topic, listener)
}
}
func (b *Bus) on(topic string, listener Listener) {
listeners, ok := b.listeners[topic]
if !ok {
b.listeners[topic] = []Listener{listener}
} else {
b.listeners[topic] = append(listeners, listener)
}
}
// Publish publishes a message to the Bus.
func (b *Bus) Publish(topic string, data interface{}) {
// Some of our actions for the socket support passing a more specific namespace,
// Some of our topics for the socket support passing a more specific namespace,
// such as "backup completed:1234" to indicate which specific backup was completed.
//
// In these cases, we still need to send the event using the standard listener
// name of "backup completed".
if strings.Contains(topic, ":") {
parts := strings.SplitN(topic, ":", 2)
if len(parts) == 2 {
topic = parts[0]
}
}
enc, err := json.Marshal(Event{Topic: topic, Data: data})
if err != nil {
panic(errors.WithStack(err))
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
listeners, ok := b.listeners[topic]
if !ok {
return
}
b.Push(enc)
if len(listeners) < 1 {
return
}
var wg sync.WaitGroup
event := Event{Topic: topic, Data: data}
for _, listener := range listeners {
l := listener
wg.Add(1)
go func(l Listener, event Event) {
defer wg.Done()
l <- event
}(l, event)
}
wg.Wait()
}
// MustDecode decodes the event byte slice back into an events.Event struct or
// panics if an error is encountered during this process.
func MustDecode(data []byte) (e Event) {
if err := DecodeTo(data, &e); err != nil {
panic(err)
// Destroy destroys the Event Bus by unregistering and closing all listeners.
func (b *Bus) Destroy() {
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
// Track what listeners have already been closed. Because the same listener
// can be listening on multiple topics, we need a way to essentially
// "de-duplicate" all the listeners across all the topics.
var closed []Listener
for _, listeners := range b.listeners {
for _, listener := range listeners {
if contains(closed, listener) {
continue
}
close(listener)
closed = append(closed, listener)
}
}
return
b.listeners = make(map[string][]Listener)
}
// DecodeTo decodes a byte slice of event data into the given interface.
func DecodeTo(data []byte, v interface{}) error {
if err := json.Unmarshal(data, &v); err != nil {
return errors.Wrap(err, "events: failed to decode byte slice")
func contains(closed []Listener, listener Listener) bool {
for _, c := range closed {
if c == listener {
return true
}
}
return nil
return false
}

View File

@@ -9,90 +9,162 @@ import (
func TestNewBus(t *testing.T) {
g := Goblin(t)
bus := NewBus()
g.Describe("Events", func() {
var bus *Bus
g.BeforeEach(func() {
bus = NewBus()
})
g.Describe("NewBus", func() {
g.It("is not nil", func() {
g.Assert(bus).IsNotNil("Bus expected to not be nil")
})
})
g.Describe("Publish", func() {
const topic = "test"
const message = "this is a test message!"
g.It("publishes message", func() {
bus := NewBus()
listener := make(chan []byte)
bus.On(listener)
done := make(chan struct{}, 1)
go func() {
select {
case v := <-listener:
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("listener did not receive message in time")
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener)
})
g.It("publishes message to all listeners", func() {
bus := NewBus()
listener := make(chan []byte)
listener2 := make(chan []byte)
listener3 := make(chan []byte)
bus.On(listener)
bus.On(listener2)
bus.On(listener3)
done := make(chan struct{}, 1)
go func() {
for i := 0; i < 3; i++ {
select {
case v := <-listener:
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case v := <-listener2:
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case v := <-listener3:
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("all listeners did not receive the message in time")
i = 3
}
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener)
bus.Off(listener2)
bus.Off(listener3)
})
g.Describe("NewBus", func() {
g.It("is not nil", func() {
g.Assert(bus).IsNotNil("Bus expected to not be nil")
g.Assert(bus.listeners).IsNotNil("Bus#listeners expected to not be nil")
})
})
}
func TestBus_Off(t *testing.T) {
g := Goblin(t)
const topic = "test"
g.Describe("Off", func() {
g.It("unregisters listener", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
bus.On(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
bus.Off(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(0, "Topic still has one or more listeners")
})
g.It("unregisters correct listener", func() {
bus := NewBus()
listener := make(chan Event)
listener2 := make(chan Event)
listener3 := make(chan Event)
bus.On(listener, topic)
bus.On(listener2, topic)
bus.On(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(3, "Listeners were not registered")
bus.Off(listener, topic)
bus.Off(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Expected 1 listener to remain")
if bus.listeners[topic][0] != listener2 {
// A normal Assert does not properly compare channels.
g.Fail("wrong listener unregistered")
}
// Cleanup
bus.Off(listener2, topic)
})
})
}
func TestBus_On(t *testing.T) {
g := Goblin(t)
const topic = "test"
g.Describe("On", func() {
g.It("registers listener", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
bus.On(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
if bus.listeners[topic][0] != listener {
// A normal Assert does not properly compare channels.
g.Fail("wrong listener registered")
}
// Cleanup
bus.Off(listener, topic)
})
})
}
func TestBus_Publish(t *testing.T) {
g := Goblin(t)
const topic = "test"
const message = "this is a test message!"
g.Describe("Publish", func() {
g.It("publishes message", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
bus.On(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
done := make(chan struct{}, 1)
go func() {
select {
case m := <-listener:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("listener did not receive message in time")
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener, topic)
})
g.It("publishes message to all listeners", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
listener2 := make(chan Event)
listener3 := make(chan Event)
bus.On(listener, topic)
bus.On(listener2, topic)
bus.On(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(3, "Listener was not registered")
done := make(chan struct{}, 1)
go func() {
for i := 0; i < 3; i++ {
select {
case m := <-listener:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case m := <-listener2:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case m := <-listener3:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("all listeners did not receive the message in time")
i = 3
}
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener, topic)
bus.Off(listener2, topic)
bus.Off(listener3, topic)
})
})
}

151
go.mod
View File

@@ -1,125 +1,118 @@
module github.com/pterodactyl/wings
go 1.18
go 1.17
require (
emperror.dev/errors v0.8.1
github.com/AlecAivazis/survey/v2 v2.3.6
emperror.dev/errors v0.8.0
github.com/AlecAivazis/survey/v2 v2.2.15
github.com/Jeffail/gabs/v2 v2.6.1
github.com/NYTimes/logrotate v1.0.0
github.com/apex/log v1.9.0
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/beevik/etree v1.1.0
github.com/buger/jsonparser v1.1.1
github.com/cenkalti/backoff/v4 v4.1.3
github.com/cenkalti/backoff/v4 v4.1.1
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
github.com/creasty/defaults v1.6.0
github.com/docker/docker v20.10.18+incompatible
github.com/creasty/defaults v1.5.1
github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-connections v0.4.0
github.com/fatih/color v1.13.0
github.com/fatih/color v1.12.0
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
github.com/gabriel-vasile/mimetype v1.4.1
github.com/gammazero/workerpool v1.1.3
github.com/gabriel-vasile/mimetype v1.3.1
github.com/gammazero/workerpool v1.1.2
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gin-gonic/gin v1.8.1
github.com/glebarez/sqlite v1.4.8
github.com/go-co-op/gocron v1.17.0
github.com/goccy/go-json v0.9.11
github.com/gin-gonic/gin v1.7.2
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.5.0
github.com/gorilla/websocket v1.4.2
github.com/iancoleman/strcase v0.2.0
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
github.com/juju/ratelimit v1.0.2
github.com/karrick/godirwalk v1.17.0
github.com/klauspost/compress v1.15.11
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
github.com/juju/ratelimit v1.0.1
github.com/karrick/godirwalk v1.16.1
github.com/klauspost/pgzip v1.2.5
github.com/magiconair/properties v1.8.6
github.com/mattn/go-colorable v0.1.13
github.com/mholt/archiver/v3 v3.5.1
github.com/magiconair/properties v1.8.5
github.com/mattn/go-colorable v0.1.8
github.com/mholt/archiver/v3 v3.5.0
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/sftp v1.13.5
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/spf13/afero v1.9.2
github.com/spf13/cobra v1.5.0
github.com/stretchr/testify v1.8.0
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
gopkg.in/ini.v1 v1.67.0
github.com/pkg/profile v1.6.0
github.com/pkg/sftp v1.13.2
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f
github.com/spf13/cobra v1.2.1
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
gopkg.in/ini.v1 v1.62.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
gorm.io/gorm v1.23.10
)
require github.com/goccy/go-json v0.9.4
require golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 // indirect
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/Microsoft/go-winio v0.5.0 // indirect
github.com/Microsoft/hcsshim v0.8.20 // indirect
github.com/andybalholm/brotli v1.0.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/containerd/containerd v1.5.5 // indirect
github.com/containerd/fifo v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/gammazero/deque v0.2.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/gammazero/deque v0.1.0 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/glebarez/go-sqlite v1.19.1 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.11.1 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/go-playground/validator/v10 v10.8.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/gorilla/mux v1.7.4 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.13.2 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/magefile/mage v1.14.0 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/magefile/mage v1.11.0 // indirect
github.com/mattn/go-isatty v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nwaples/rardecode v1.1.3 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/nwaples/rardecode v1.1.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pierrec/lz4/v4 v4.1.8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.13.0 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/prometheus/common v0.30.0 // indirect
github.com/prometheus/procfs v0.7.1 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/ugorji/go/codec v1.2.7 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
golang.org/x/text v0.3.8 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
golang.org/x/tools v0.1.12 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.28.1 // indirect
modernc.org/libc v1.20.0 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect
modernc.org/sqlite v1.19.1 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20210729151513-df9385d47c1b // indirect
google.golang.org/grpc v1.39.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

532
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,6 @@ import (
"emperror.dev/errors"
"github.com/asaskevich/govalidator"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server"
)
@@ -38,7 +37,7 @@ func New(ctx context.Context, manager *server.Manager, details ServerDetails) (*
// Create a new server instance using the configuration we wrote to the disk
// so that everything gets instantiated correctly on the struct.
s, err := manager.InitServer(ctx, c)
s, err := manager.InitServer(c)
if err != nil {
return nil, errors.WrapIf(err, "installer: could not init server instance")
}

View File

@@ -1,59 +0,0 @@
package cron
import (
"context"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
)
type activityCron struct {
mu *system.AtomicBool
manager *server.Manager
max int
}
// Run executes the cronjob and ensures we fetch and send all of the stored activity to the
// Panel instance. Once activity is sent it is deleted from the local database instance. Any
// SFTP specific events are not handled in this cron, they're handled seperately to account
// for de-duplication and event merging.
func (ac *activityCron) Run(ctx context.Context) error {
// Don't execute this cron if there is currently one running. Once this task is completed
// go ahead and mark it as no longer running.
if !ac.mu.SwapIf(true) {
return errors.WithStack(ErrCronRunning)
}
defer ac.mu.Store(false)
var activity []models.Activity
tx := database.Instance().WithContext(ctx).
Where("event NOT LIKE ?", "server:sftp.%").
Limit(ac.max).
Find(&activity)
if tx.Error != nil {
return errors.WithStack(tx.Error)
}
if len(activity) == 0 {
return nil
}
if err := ac.manager.Client().SendActivityLogs(ctx, activity); err != nil {
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
}
var ids []int
for _, v := range activity {
ids = append(ids, v.ID)
}
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
if tx.Error != nil {
return errors.WithStack(tx.Error)
}
return nil
}

View File

@@ -1,73 +0,0 @@
package cron
import (
"context"
"time"
"emperror.dev/errors"
log2 "github.com/apex/log"
"github.com/go-co-op/gocron"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
)
const ErrCronRunning = errors.Sentinel("cron: job already running")
var o system.AtomicBool
// Scheduler configures the internal cronjob system for Wings and returns the scheduler
// instance to the caller. This should only be called once per application lifecycle, additional
// calls will result in an error being returned.
func Scheduler(ctx context.Context, m *server.Manager) (*gocron.Scheduler, error) {
if !o.SwapIf(true) {
return nil, errors.New("cron: cannot call scheduler more than once in application lifecycle")
}
l, err := time.LoadLocation(config.Get().System.Timezone)
if err != nil {
return nil, errors.Wrap(err, "cron: failed to parse configured system timezone")
}
activity := activityCron{
mu: system.NewAtomicBool(false),
manager: m,
max: config.Get().System.ActivitySendCount,
}
sftp := sftpCron{
mu: system.NewAtomicBool(false),
manager: m,
max: config.Get().System.ActivitySendCount,
}
s := gocron.NewScheduler(l)
log := log2.WithField("subsystem", "cron")
interval := time.Duration(config.Get().System.ActivitySendInterval) * time.Second
log.WithField("interval", interval).Info("configuring system crons")
_, _ = s.Tag("activity").Every(interval).Do(func() {
log.WithField("cron", "activity").Debug("sending internal activity events to Panel")
if err := activity.Run(ctx); err != nil {
if errors.Is(err, ErrCronRunning) {
log.WithField("cron", "activity").Warn("activity process is already running, skipping...")
} else {
log.WithField("cron", "activity").WithField("error", err).Error("activity process failed to execute")
}
}
})
_, _ = s.Tag("sftp").Every(interval).Do(func() {
log.WithField("cron", "sftp").Debug("sending sftp events to Panel")
if err := sftp.Run(ctx); err != nil {
if errors.Is(err, ErrCronRunning) {
log.WithField("cron", "sftp").Warn("sftp events process already running, skipping...")
} else {
log.WithField("cron", "sftp").WithField("error", err).Error("sftp events process failed to execute")
}
}
})
return s, nil
}

View File

@@ -1,177 +0,0 @@
package cron
import (
"context"
"reflect"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
)
type sftpCron struct {
mu *system.AtomicBool
manager *server.Manager
max int
}
type mapKey struct {
User string
Server string
IP string
Event models.Event
Timestamp string
}
type eventMap struct {
max int
ids []int
m map[mapKey]*models.Activity
}
// Run executes the SFTP reconciliation cron. This job will pull all of the SFTP specific events
// and merge them together across user, server, ip, and event. This allows a SFTP event that deletes
// tens or hundreds of files to be tracked as a single "deletion" event so long as they all occur
// within the same one minute period of time (starting at the first timestamp for the group). Without
// this we'd end up flooding the Panel event log with excessive data that is of no use to end users.
func (sc *sftpCron) Run(ctx context.Context) error {
if !sc.mu.SwapIf(true) {
return errors.WithStack(ErrCronRunning)
}
defer sc.mu.Store(false)
var o int
activity, err := sc.fetchRecords(ctx, o)
if err != nil {
return err
}
o += len(activity)
events := &eventMap{
m: map[mapKey]*models.Activity{},
ids: []int{},
max: sc.max,
}
for {
if len(activity) == 0 {
break
}
slen := len(events.ids)
for _, a := range activity {
events.Push(a)
}
if len(events.ids) > slen {
// Execute the query again, we found some events so we want to continue
// with this. Start at the next offset.
activity, err = sc.fetchRecords(ctx, o)
if err != nil {
return errors.WithStack(err)
}
o += len(activity)
} else {
break
}
}
if len(events.m) == 0 {
return nil
}
if err := sc.manager.Client().SendActivityLogs(ctx, events.Elements()); err != nil {
return errors.Wrap(err, "failed to send sftp activity logs to Panel")
}
if tx := database.Instance().Where("id IN ?", events.ids).Delete(&models.Activity{}); tx.Error != nil {
return errors.WithStack(tx.Error)
}
return nil
}
// fetchRecords returns a group of activity events starting at the given offset. This is used
// since we might need to make multiple database queries to select enough events to properly
// fill up our request to the given maximum. This is due to the fact that this cron merges any
// activity that line up across user, server, ip, and event into a single activity record when
// sending the data to the Panel.
func (sc *sftpCron) fetchRecords(ctx context.Context, offset int) (activity []models.Activity, err error) {
tx := database.Instance().WithContext(ctx).
Where("event LIKE ?", "server:sftp.%").
Order("event DESC").
Offset(offset).
Limit(sc.max).
Find(&activity)
if tx.Error != nil {
err = errors.WithStack(tx.Error)
}
return
}
// Push adds an activity to the event mapping, or de-duplicates it and merges the files metadata
// into the existing entity that exists.
func (em *eventMap) Push(a models.Activity) {
m := em.forActivity(a)
// If no activity entity is returned we've hit the cap for the number of events to
// send along to the Panel. Just skip over this record and we'll account for it in
// the next iteration.
if m == nil {
return
}
em.ids = append(em.ids, a.ID)
// Always reduce this to the first timestamp that was recorded for the set
// of events, and not
if a.Timestamp.Before(m.Timestamp) {
m.Timestamp = a.Timestamp
}
list := m.Metadata["files"].([]interface{})
if s, ok := a.Metadata["files"]; ok {
v := reflect.ValueOf(s)
if v.Kind() != reflect.Slice || v.IsNil() {
return
}
for i := 0; i < v.Len(); i++ {
list = append(list, v.Index(i).Interface())
}
// You must set it again at the end of the process, otherwise you've only updated the file
// slice in this one loop since it isn't passed by reference. This is just shorter than having
// to explicitly keep casting it to the slice.
m.Metadata["files"] = list
}
}
// Elements returns the finalized activity models.
func (em *eventMap) Elements() (out []models.Activity) {
for _, v := range em.m {
out = append(out, *v)
}
return
}
// forActivity returns an event entity from our map which allows existing matches to be
// updated with additional files.
func (em *eventMap) forActivity(a models.Activity) *models.Activity {
key := mapKey{
User: a.User.String,
Server: a.Server,
IP: a.IP,
Event: a.Event,
// We group by the minute, don't care about the seconds for this logic.
Timestamp: a.Timestamp.Format("2006-01-02_15:04"),
}
if v, ok := em.m[key]; ok {
return v
}
// Cap the size of the events map at the defined maximum events to send to the Panel. Just
// return nil and let the caller handle it.
if len(em.m) >= em.max {
return nil
}
// Doesn't exist in our map yet, create a copy of the activity passed into this
// function and then assign it into the map with an empty metadata value.
v := a
v.Metadata = models.ActivityMeta{
"files": make([]interface{}, 0),
}
em.m[key] = &v
return &v
}

View File

@@ -1,61 +0,0 @@
package database
import (
"path/filepath"
"time"
"emperror.dev/errors"
"github.com/glebarez/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/system"
)
var (
o system.AtomicBool
db *gorm.DB
)
// Initialize configures the local SQLite database for Wings and ensures that the models have
// been fully migrated.
func Initialize() error {
if !o.SwapIf(true) {
panic("database: attempt to initialize more than once during application lifecycle")
}
p := filepath.Join(config.Get().System.RootDirectory, "wings.db")
instance, err := gorm.Open(sqlite.Open(p), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil {
return errors.Wrap(err, "database: could not open database file")
}
db = instance
if sql, err := db.DB(); err != nil {
return errors.WithStack(err)
} else {
sql.SetMaxOpenConns(1)
sql.SetConnMaxLifetime(time.Hour)
}
if tx := db.Exec("PRAGMA synchronous = OFF"); tx.Error != nil {
return errors.WithStack(tx.Error)
}
if tx := db.Exec("PRAGMA journal_mode = MEMORY"); tx.Error != nil {
return errors.WithStack(tx.Error)
}
if err := db.AutoMigrate(&models.Activity{}); err != nil {
return errors.WithStack(err)
}
return nil
}
// Instance returns the gorm database instance that was configured when the application was
// booted.
func Instance() *gorm.DB {
if db == nil {
panic("database: attempt to access instance before initialized")
}
return db
}

View File

@@ -1,69 +0,0 @@
package models
import (
"time"
"gorm.io/gorm"
"github.com/pterodactyl/wings/system"
)
type Event string
type ActivityMeta map[string]interface{}
// Activity defines an activity log event for a server entity performed by a user. This is
// used for tracking commands, power actions, and SFTP events so that they can be reconciled
// and sent back to the Panel instance to be displayed to the user.
type Activity struct {
ID int `gorm:"primaryKey;not null" json:"-"`
// User is UUID of the user that triggered this event, or an empty string if the event
// cannot be tied to a specific user, in which case we will assume it was the system
// user.
User JsonNullString `gorm:"type:uuid" json:"user"`
// Server is the UUID of the server this event is associated with.
Server string `gorm:"type:uuid;not null" json:"server"`
// Event is a string that describes what occurred, and is used by the Panel instance to
// properly associate this event in the activity logs.
Event Event `gorm:"index;not null" json:"event"`
// Metadata is either a null value, string, or a JSON blob with additional event specific
// metadata that can be provided.
Metadata ActivityMeta `gorm:"serializer:json" json:"metadata"`
// IP is the IP address that triggered this event, or an empty string if it cannot be
// determined properly. This should be the connecting user's IP address, and not the
// internal system IP.
IP string `gorm:"not null" json:"ip"`
Timestamp time.Time `gorm:"not null" json:"timestamp"`
}
// SetUser sets the current user that performed the action. If an empty string is provided
// it is cast into a null value when stored.
func (a Activity) SetUser(u string) *Activity {
var ns JsonNullString
if u == "" {
if err := ns.Scan(nil); err != nil {
panic(err)
}
} else {
if err := ns.Scan(u); err != nil {
panic(err)
}
}
a.User = ns
return &a
}
// BeforeCreate executes before we create any activity entry to ensure the IP address
// is trimmed down to remove any extraneous data, and the timestamp is set to the current
// system time and then stored as UTC.
func (a *Activity) BeforeCreate(_ *gorm.DB) error {
a.IP = system.TrimIPSuffix(a.IP)
if a.Timestamp.IsZero() {
a.Timestamp = time.Now()
}
a.Timestamp = a.Timestamp.UTC()
if a.Metadata == nil {
a.Metadata = ActivityMeta{}
}
return nil
}

View File

@@ -1,32 +0,0 @@
package models
import (
"database/sql"
"emperror.dev/errors"
"github.com/goccy/go-json"
)
type JsonNullString struct {
sql.NullString
}
func (v JsonNullString) MarshalJSON() ([]byte, error) {
if v.Valid {
return json.Marshal(v.String)
} else {
return json.Marshal(nil)
}
}
func (v *JsonNullString) UnmarshalJSON(data []byte) error {
var s *string
if err := json.Unmarshal(data, &s); err != nil {
return errors.WithStack(err)
}
if s != nil {
v.String = *s
}
v.Valid = s != nil
return nil
}

View File

@@ -1,330 +0,0 @@
package vhd
import (
"context"
"emperror.dev/errors"
"fmt"
"github.com/pterodactyl/wings/config"
"github.com/spf13/afero"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
)
var (
ErrInvalidDiskPathTarget = errors.Sentinel("vhd: disk path is a directory or symlink")
ErrMountPathNotDirectory = errors.Sentinel("vhd: mount point is not a directory")
ErrFilesystemMounted = errors.Sentinel("vhd: filesystem is already mounted")
ErrFilesystemNotMounted = errors.Sentinel("vhd: filesystem is not mounted")
ErrFilesystemExists = errors.Sentinel("vhd: filesystem already exists on disk")
)
var useDdAllocation bool
var setDdAllocator sync.Once
// hasExitCode allows this code to test the response error to see if there is
// an exit code available from the command call that can be used to determine if
// something went wrong.
type hasExitCode interface {
ExitCode() int
}
// Commander defines an interface that must be met for executing commands on the
// underlying OS. By default the vhd package will use Go's exec.Cmd type for
// execution. This interface allows stubbing out on tests, or potentially custom
// setups down the line.
type Commander interface {
Run() error
Output() ([]byte, error)
String() string
}
// CommanderProvider is a function that provides a struct meeting the Commander
// interface requirements.
type CommanderProvider func(ctx context.Context, name string, args ...string) Commander
// CfgOption is a configuration option callback for the Disk.
type CfgOption func(d *Disk) *Disk
// Disk represents the underlying virtual disk for the instance.
type Disk struct {
mu sync.RWMutex
// The total size of the disk allowed in bytes.
size int64
// The path where the disk image should be created.
diskPath string
// The point at which this disk should be made available on the system. This
// is where files can be read/written to.
mountAt string
fs afero.Fs
commander CommanderProvider
}
// DiskPath returns the underlying path that contains the virtual disk for the server
// identified by its UUID.
func DiskPath(uuid string) string {
return filepath.Join(config.Get().System.Data, ".vhd/", uuid+".img")
}
// Enabled returns true when VHD support is enabled on the instance.
func Enabled() bool {
return config.Get().Servers.Filesystem.Driver == config.FSDriverVHD
}
// New returns a new Disk instance. The "size" parameter should be provided in
// bytes of space allowed for the disk. An additional slice of option callbacks
// can be provided to programatically swap out the underlying filesystem
// implementation or the underlying command exection engine.
func New(size int64, diskPath string, mountAt string, opts ...func(*Disk)) *Disk {
if diskPath == "" || mountAt == "" {
panic("vhd: cannot specify an empty disk or mount path")
}
d := Disk{
size: size,
diskPath: diskPath,
mountAt: mountAt,
fs: afero.NewOsFs(),
commander: func(ctx context.Context, name string, args ...string) Commander {
return exec.CommandContext(ctx, name, args...)
},
}
for _, opt := range opts {
opt(&d)
}
return &d
}
// WithFs allows for a different underlying filesystem to be provided to the
// virtual disk manager.
func WithFs(fs afero.Fs) func(*Disk) {
return func(d *Disk) {
d.fs = fs
}
}
// WithCommander allows a different Commander provider to be provided.
func WithCommander(c CommanderProvider) func(*Disk) {
return func(d *Disk) {
d.commander = c
}
}
func (d *Disk) Path() string {
return d.diskPath
}
func (d *Disk) MountPath() string {
return d.mountAt
}
// Exists reports if the disk exists on the system yet or not. This only verifies
// the presence of the disk image, not the validity of it. An error is returned
// if the path exists but the destination is not a file or is a symlink.
func (d *Disk) Exists() (bool, error) {
d.mu.RLock()
defer d.mu.RUnlock()
st, err := d.fs.Stat(d.diskPath)
if err != nil && os.IsNotExist(err) {
return false, nil
} else if err != nil {
return false, errors.WithStack(err)
}
if !st.IsDir() && st.Mode()&os.ModeSymlink == 0 {
return true, nil
}
return false, errors.WithStack(ErrInvalidDiskPathTarget)
}
// IsMounted checks to see if the given disk is currently mounted.
func (d *Disk) IsMounted(ctx context.Context) (bool, error) {
find := d.mountAt + " ext4"
cmd := d.commander(ctx, "grep", "-qs", find, "/proc/mounts")
if err := cmd.Run(); err != nil {
if v, ok := err.(hasExitCode); ok {
if v.ExitCode() == 1 {
return false, nil
}
}
return false, errors.Wrap(err, "vhd: failed to execute grep for mount existence")
}
return true, nil
}
// Mount attempts to mount the disk as configured. If it does not exist or the
// mount command fails an error will be returned to the caller. This does not
// attempt to create the disk if it is missing from the filesystem.
//
// Attempting to mount a disk which does not exist will result in an error being
// returned to the caller. If the disk is already mounted an ErrFilesystemMounted
// error is returned to the caller.
func (d *Disk) Mount(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.mount(ctx)
}
// Unmount attempts to unmount the disk from the system. If the disk is not
// currently mounted this function is a no-op and ErrFilesystemNotMounted is
// returned to the caller.
func (d *Disk) Unmount(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.unmount(ctx)
}
// Allocate executes the "fallocate" command on the disk. This will first unmount
// the disk from the system before attempting to actually allocate the space. If
// this disk already exists on the machine it will be resized accordingly.
//
// DANGER! This will unmount the disk from the machine while performing this
// action, use caution when calling it during normal processes.
func (d *Disk) Allocate(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
if exists, err := d.Exists(); exists {
// If the disk currently exists attempt to unmount the mount point before
// allocating space.
if err := d.Unmount(ctx); err != nil {
return errors.WithStackIf(err)
}
} else if err != nil {
return errors.Wrap(err, "vhd: failed to check for existence of root disk")
}
trim := path.Base(d.diskPath)
if err := d.fs.MkdirAll(strings.TrimSuffix(d.diskPath, trim), 0700); err != nil {
return errors.Wrap(err, "vhd: failed to create base vhd disk directory")
}
cmd := d.allocationCmd(ctx)
if _, err := cmd.Output(); err != nil {
msg := "vhd: failed to execute space allocation command"
if v, ok := err.(*exec.ExitError); ok {
stderr := strings.Trim(string(v.Stderr), ".\n")
if !useDdAllocation && strings.HasSuffix(stderr, "not supported") {
// Try again: fallocate is not supported on some filesystems so we'll fall
// back to making use of dd for subsequent operations.
setDdAllocator.Do(func() {
useDdAllocation = true
})
return d.Allocate(ctx)
}
msg = msg + ": " + stderr
}
return errors.Wrap(err, msg)
}
return errors.WithStack(d.fs.Chmod(d.diskPath, 0600))
}
// Resize will change the internal disk size limit and then allocate the new
// space to the disk automatically.
func (d *Disk) Resize(ctx context.Context, size int64) error {
atomic.StoreInt64(&d.size, size)
return d.Allocate(ctx)
}
// Destroy removes the underlying allocated disk image and unmounts the disk.
func (d *Disk) Destroy(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
if err := d.unmount(ctx); err != nil {
return errors.WithStackIf(err)
}
return errors.WithStackIf(d.fs.RemoveAll(d.mountAt))
}
// MakeFilesystem will attempt to execute the "mkfs" command against the disk on
// the machine. If the disk has already been created this command will return an
// ErrFilesystemExists error to the caller. You should manually unmount the disk
// if it shouldn't be mounted at this point.
func (d *Disk) MakeFilesystem(ctx context.Context) error {
d.mu.Lock()
defer d.mu.Unlock()
// If no error is returned when mounting DO NOT execute this command as it will
// completely destroy the data stored at that location.
err := d.Mount(ctx)
if err == nil || errors.Is(err, ErrFilesystemMounted) {
// If it wasn't already mounted try to clean up at this point and unmount
// the disk. If this fails just ignore it for now.
if err != nil {
_ = d.Unmount(ctx)
}
return ErrFilesystemExists
}
if !strings.Contains(err.Error(), "can't find in /etc/fstab") && !strings.Contains(err.Error(), "exit status 32") {
return errors.WrapIf(err, "vhd: unexpected error from mount command")
}
// As long as we got an error back that was because we couldn't find thedisk
// in the /etc/fstab file we're good. Otherwise it means the disk probably exists
// or something else went wrong.
//
// Because this is a destructive command and non-tty based exection of it implies
// "-F" (force), we need to only run it when we can guarantee it doesn't already
// exist. No vague "maybe that error is expected" allowed here.
cmd := d.commander(ctx, "mkfs", "-t", "ext4", d.diskPath)
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "vhd: failed to make filesystem for disk")
}
return nil
}
func (d *Disk) mount(ctx context.Context) error {
if isMounted, err := d.IsMounted(ctx); err != nil {
return errors.WithStackIf(err)
} else if isMounted {
return ErrFilesystemMounted
}
if st, err := d.fs.Stat(d.mountAt); err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "vhd: failed to stat mount path")
} else if os.IsNotExist(err) {
if err := d.fs.MkdirAll(d.mountAt, 0700); err != nil {
return errors.Wrap(err, "vhd: failed to create mount path")
}
} else if !st.IsDir() {
return errors.WithStack(ErrMountPathNotDirectory)
}
u := config.Get().System.User
if err := d.fs.Chown(d.mountAt, u.Uid, u.Gid); err != nil {
return errors.Wrap(err, "vhd: failed to chown mount point")
}
cmd := d.commander(ctx, "mount", "-t", "auto", "-o", "loop", d.diskPath, d.mountAt)
if _, err := cmd.Output(); err != nil {
msg := "vhd: failed to mount disk"
if v, ok := err.(*exec.ExitError); ok {
msg = msg + ": " + strings.Trim(string(v.Stderr), ".\n")
}
return errors.Wrap(err, msg)
}
return nil
}
func (d *Disk) unmount(ctx context.Context) error {
cmd := d.commander(ctx, "umount", d.mountAt)
if err := cmd.Run(); err != nil {
v, ok := err.(hasExitCode)
if ok && v.ExitCode() == 32 {
return ErrFilesystemNotMounted
}
return errors.Wrap(err, "vhd: failed to execute unmount command for disk")
}
return nil
}
// allocationCmd returns the command to allocate the disk image. This will attempt to
// use the fallocate command if available, otherwise it will fall back to dd if the
// fallocate command has previously failed.
//
// We use 1024 as the multiplier for all of the disk space logic within the application.
// Passing "K" (/1024) is the same as "KiB" for fallocate, but is different than "KB" (/1000).
func (d *Disk) allocationCmd(ctx context.Context) Commander {
s := atomic.LoadInt64(&d.size) / 1024
if useDdAllocation {
return d.commander(ctx, "dd", "if=/dev/zero", fmt.Sprintf("of=%s", d.diskPath), fmt.Sprintf("bs=%dk", s), "count=1")
}
return d.commander(ctx, "fallocate", "-l", fmt.Sprintf("%dK", s), d.diskPath)
}

View File

@@ -1,476 +0,0 @@
package vhd
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"testing"
"github.com/pterodactyl/wings/config"
"github.com/spf13/afero"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func init() {
config.Set(&config.Configuration{
AuthenticationToken: "token123",
System: config.SystemConfiguration{
User: struct {
Uid int
Gid int
}{Uid: 10, Gid: 10},
},
})
}
type mockCmd struct {
run func() error
output func() ([]byte, error)
string func() string
}
func (m *mockCmd) Run() error {
if m.run != nil {
return m.run()
}
return nil
}
func (m *mockCmd) Output() ([]byte, error) {
if m.output != nil {
return m.output()
}
return nil, nil
}
func (m *mockCmd) String() string {
if m.string != nil {
return m.string()
}
return ""
}
var _ Commander = (*mockCmd)(nil)
type mockedExitCode struct {
code int
}
func (m *mockedExitCode) ExitCode() int {
return m.code
}
func (m *mockedExitCode) Error() string {
return fmt.Sprintf("mocked exit code: code %d", m.code)
}
func newMockDisk(c CommanderProvider) *Disk {
commander := func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{}
}
w := commander
if c != nil {
w = c
}
return New(100 * 1024 * 1024, "/disk.img", "/mnt", WithFs(afero.NewMemMapFs()), WithCommander(w))
}
func Test_New(t *testing.T) {
t.Run("creates expected struct", func(t *testing.T) {
d := New(100 * 1024 * 1024, "/disk.img", "/mnt")
assert.NotNil(t, d)
assert.Equal(t, int64(100 * 1024 * 1024), d.size)
assert.Equal(t, "/disk.img", d.diskPath)
assert.Equal(t, "/mnt", d.mountAt)
// Ensure by default we get a commander interface returned and that it
// returns an *exec.Cmd.
o := d.commander(context.TODO(), "foo", "-bar")
assert.NotNil(t, o)
_, ok := o.(Commander)
assert.True(t, ok)
_, ok = o.(*exec.Cmd)
assert.True(t, ok)
})
t.Run("creates an instance with custom options", func(t *testing.T) {
fs := afero.NewMemMapFs()
cprov := struct {
Commander
}{}
c := func(ctx context.Context, name string, args ...string) Commander {
return &cprov
}
d := New(100, "/disk.img", "/mnt", WithFs(fs), WithCommander(c))
assert.NotNil(t, d)
assert.Same(t, fs, d.fs)
assert.Same(t, &cprov, d.commander(context.TODO(), ""))
})
t.Run("panics if either path is empty", func(t *testing.T) {
assert.Panics(t, func() {
_ = New(100, "", "/bar")
})
assert.Panics(t, func() {
_ = New(100, "/foo", "")
})
})
}
func TestDisk_Exists(t *testing.T) {
t.Run("it exists", func(t *testing.T) {
d := newMockDisk(nil)
f, err := d.fs.Create("/disk.img")
require.NoError(t, err)
_ = f.Close()
exists, err := d.Exists()
assert.NoError(t, err)
assert.True(t, exists)
})
t.Run("it does not exist", func(t *testing.T) {
d := newMockDisk(nil)
exists, err := d.Exists()
assert.NoError(t, err)
assert.False(t, exists)
})
t.Run("it reports errors", func(t *testing.T) {
d := newMockDisk(nil)
err := d.fs.Mkdir("/disk.img", 0600)
require.NoError(t, err)
exists, err := d.Exists()
assert.Error(t, err)
assert.False(t, exists)
assert.EqualError(t, err, ErrInvalidDiskPathTarget.Error())
})
}
func TestDisk_IsMounted(t *testing.T) {
t.Run("executes command and finds mounted disk", func(t *testing.T) {
is := assert.New(t)
var called bool
pctx := context.TODO()
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
is.Same(pctx, ctx)
is.Equal("grep", name)
is.Len(args, 3)
is.Equal([]string{"-qs", "/mnt ext4", "/proc/mounts"}, args)
return &mockCmd{}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(pctx)
is.NoError(err)
is.True(mnt)
is.True(called)
})
t.Run("handles exit code 1 gracefully", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(context.TODO())
assert.NoError(t, err)
assert.False(t, mnt)
assert.True(t, called)
})
t.Run("handles unexpected errors successfully", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 3}
},
}
}
disk := newMockDisk(cmd)
mnt, err := disk.IsMounted(context.TODO())
assert.Error(t, err)
assert.False(t, mnt)
})
}
func TestDisk_Mount(t *testing.T) {
failedCmd := func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{run: func() error {
return &mockedExitCode{code: 1}
}}
}
t.Run("error is returned if mount point is not a directory", func(t *testing.T) {
disk := newMockDisk(failedCmd)
_, err := disk.fs.Create("/mnt")
require.NoError(t, err)
err = disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrMountPathNotDirectory.Error())
})
t.Run("error is returned if mount point cannot be created", func(t *testing.T) {
disk := newMockDisk(failedCmd)
disk.fs = afero.NewReadOnlyFs(disk.fs)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to create mount path: operation not permitted")
})
t.Run("error is returned if already mounted", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrFilesystemMounted.Error())
})
t.Run("error is returned if mount command fails", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
output: func() ([]byte, error) {
called = true
assert.Equal(t, "mount", name)
assert.Equal(t, []string{"-t", "auto", "-o", "loop", "/disk.img", "/mnt"}, args)
return nil, &exec.ExitError{
ProcessState: &os.ProcessState{},
Stderr: []byte("foo bar.\n"),
}
},
}
}
disk := newMockDisk(cmd)
err := disk.Mount(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to mount disk: foo bar: exit status 0")
assert.True(t, called)
})
t.Run("disk can be mounted at existing path", func(t *testing.T) {
disk := newMockDisk(failedCmd)
require.NoError(t, disk.fs.Mkdir("/mnt", 0600))
err := disk.Mount(context.TODO())
assert.NoError(t, err)
})
t.Run("disk can be mounted at non-existing path", func(t *testing.T) {
disk := newMockDisk(failedCmd)
err := disk.Mount(context.TODO())
assert.NoError(t, err)
st, err := disk.fs.Stat("/mnt")
assert.NoError(t, err)
assert.True(t, st.IsDir())
})
}
func TestDisk_Unmount(t *testing.T) {
t.Run("can unmount a disk", func(t *testing.T) {
is := assert.New(t)
pctx := context.TODO()
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
called = true
is.Same(pctx, ctx)
is.Equal("umount", name)
is.Equal([]string{"/mnt"}, args)
return &mockCmd{}
}
disk := newMockDisk(cmd)
err := disk.Unmount(pctx)
is.NoError(err)
is.True(called)
})
t.Run("handles exit code 32 correctly", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 32}
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.NoError(t, err)
})
t.Run("non code 32 errors are returned as error", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return &mockedExitCode{code: 1}
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.Error(t, err)
})
t.Run("errors without ExitCode function are returned", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
return errors.New("foo bar")
},
}
}
disk := newMockDisk(cmd)
err := disk.Unmount(context.TODO())
assert.Error(t, err)
})
}
func TestDisk_Allocate(t *testing.T) {
t.Run("disk is unmounted before allocating space", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
output: func() ([]byte, error) {
called = true
assert.Equal(t, "fallocate", name)
assert.Equal(t, []string{"-l", "102400K", "/disk.img"}, args)
return nil, nil
},
}
}
disk := newMockDisk(cmd)
err := disk.fs.Mkdir("/mnt", 0600)
require.NoError(t, err)
err = disk.Allocate(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("disk space is allocated even when not exists", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.Allocate(context.TODO())
assert.NoError(t, err)
})
t.Run("error is returned if command fails", func(t *testing.T) {
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
output: func() ([]byte, error) {
return nil, &exec.ExitError{
ProcessState: &os.ProcessState{},
Stderr: []byte("foo bar.\n"),
}
},
}
}
disk := newMockDisk(cmd)
_, err := disk.fs.Create("/disk.img")
require.NoError(t, err)
err = disk.Allocate(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, "vhd: failed to execute fallocate command: foo bar: exit status 0")
})
}
func TestDisk_MakeFilesystem(t *testing.T) {
t.Run("filesystem is created if not found in /etc/fstab", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
// Expect the call from IsMounted here and just return what we need
// to indicate that nothing is currently mounted.
if name == "grep" {
return &mockedExitCode{code: 1}
}
called = true
assert.Equal(t, "mkfs", name)
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
return nil
},
output: func() ([]byte, error) {
return nil, errors.New("error: can't find in /etc/fstab foo bar testing")
},
}
}
disk := newMockDisk(cmd)
err := disk.MakeFilesystem(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("filesystem is created if error is returned from mount command", func(t *testing.T) {
var called bool
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
return &mockCmd{
run: func() error {
// Expect the call from IsMounted here and just return what we need
// to indicate that nothing is currently mounted.
if name == "grep" {
return &mockedExitCode{code: 1}
}
called = true
assert.Equal(t, "mkfs", name)
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
return nil
},
output: func() ([]byte, error) {
if name == "mount" {
return nil, &exec.ExitError{
Stderr: []byte("foo bar: exit status 32\n"),
}
}
return nil, nil
},
}
}
disk := newMockDisk(cmd)
err := disk.MakeFilesystem(context.TODO())
assert.NoError(t, err)
assert.True(t, called)
})
t.Run("error is returned if currently mounted", func(t *testing.T) {
disk := newMockDisk(nil)
err := disk.MakeFilesystem(context.TODO())
assert.Error(t, err)
assert.EqualError(t, err, ErrFilesystemExists.Error())
})
}

View File

@@ -11,11 +11,11 @@ import (
"github.com/apex/log"
"github.com/beevik/etree"
"github.com/buger/jsonparser"
"github.com/goccy/go-json"
"github.com/icza/dyno"
"github.com/magiconair/properties"
"github.com/goccy/go-json"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
"github.com/pterodactyl/wings/config"
)

View File

@@ -10,14 +10,11 @@ import (
"strings"
"time"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/cenkalti/backoff/v4"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)
@@ -33,7 +30,6 @@ type Client interface {
SetInstallationStatus(ctx context.Context, uuid string, successful bool) error
SetTransferStatus(ctx context.Context, uuid string, successful bool) error
ValidateSftpCredentials(ctx context.Context, request SftpAuthRequest) (SftpAuthResponse, error)
SendActivityLogs(ctx context.Context, activity []models.Activity) error
}
type client struct {
@@ -60,18 +56,6 @@ func New(base string, opts ...ClientOption) Client {
return &c
}
// NewFromConfig returns a new Client using the configuration passed through
// by the caller.
func NewFromConfig(cfg *config.Configuration, opts ...ClientOption) Client {
passOpts := []ClientOption{
WithCredentials(cfg.AuthenticationTokenId, cfg.AuthenticationToken),
WithHttpClient(&http.Client{
Timeout: time.Second * time.Duration(cfg.RemoteQuery.Timeout),
}),
}
return New(cfg.PanelLocation, append(passOpts, opts...)...)
}
// WithCredentials sets the credentials to use when making request to the remote
// API endpoint.
func WithCredentials(id, token string) ClientOption {
@@ -144,19 +128,10 @@ func (c *client) requestOnce(ctx context.Context, method, path string, body io.R
// and adds the required authentication headers to the request that is being
// created. Errors returned will be of the RequestError type if there was some
// type of response from the API that can be parsed.
func (c *client) request(ctx context.Context, method, path string, body *bytes.Buffer, opts ...func(r *http.Request)) (*Response, error) {
func (c *client) request(ctx context.Context, method, path string, body io.Reader, opts ...func(r *http.Request)) (*Response, error) {
var res *Response
err := backoff.Retry(func() error {
var b bytes.Buffer
if body != nil {
// We have to create a copy of the body, otherwise attempting this request again will
// send no data if there was initially a body since the "requestOnce" method will read
// the whole buffer, thus leaving it empty at the end.
if _, err := b.Write(body.Bytes()); err != nil {
return backoff.Permanent(errors.Wrap(err, "http: failed to copy body buffer"))
}
}
r, err := c.requestOnce(ctx, method, path, &b, opts...)
r, err := c.requestOnce(ctx, method, path, body, opts...)
if err != nil {
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return backoff.Permanent(err)
@@ -167,10 +142,12 @@ func (c *client) request(ctx context.Context, method, path string, body *bytes.B
if r.HasError() {
// Close the request body after returning the error to free up resources.
defer r.Body.Close()
// Don't keep attempting to access this endpoint if the response is a 4XX
// level error which indicates a client mistake. Only retry when the error
// is due to a server issue (5XX error).
if r.StatusCode >= 400 && r.StatusCode < 500 {
// Don't keep spamming the endpoint if we've already made too many requests or
// if we're not even authenticated correctly. Retrying generally won't fix either
// of these issues.
if r.StatusCode == http.StatusForbidden ||
r.StatusCode == http.StatusTooManyRequests ||
r.StatusCode == http.StatusUnauthorized {
return backoff.Permanent(r.Error())
}
return r.Error()

View File

@@ -6,8 +6,6 @@ import (
"strconv"
"sync"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"golang.org/x/sync/errgroup"
@@ -180,16 +178,6 @@ func (c *client) SendRestorationStatus(ctx context.Context, backup string, succe
return nil
}
// SendActivityLogs sends activity logs back to the Panel for processing.
func (c *client) SendActivityLogs(ctx context.Context, activity []models.Activity) error {
resp, err := c.Post(ctx, "/activity", d{"data": activity})
if err != nil {
return errors.WithStackIf(err)
}
_ = resp.Body.Close()
return nil
}
// getServersPaged returns a subset of servers from the Panel API using the
// pagination query parameters.
func (c *client) getServersPaged(ctx context.Context, page, limit int) ([]RawServerData, Pagination, error) {

View File

@@ -11,11 +11,6 @@ import (
"github.com/pterodactyl/wings/parser"
)
const (
SftpAuthPassword = SftpAuthRequestType("password")
SftpAuthPublicKey = SftpAuthRequestType("public_key")
)
// A generic type allowing for easy binding use when making requests to API
// endpoints that only expect a singular argument or something that would not
// benefit from being a typed struct.
@@ -68,17 +63,14 @@ type RawServerData struct {
ProcessConfiguration json.RawMessage `json:"process_configuration"`
}
type SftpAuthRequestType string
// SftpAuthRequest defines the request details that are passed along to the Panel
// when determining if the credentials provided to Wings are valid.
type SftpAuthRequest struct {
Type SftpAuthRequestType `json:"type"`
User string `json:"username"`
Pass string `json:"password"`
IP string `json:"ip"`
SessionID []byte `json:"session_id"`
ClientVersion []byte `json:"client_version"`
User string `json:"username"`
Pass string `json:"password"`
IP string `json:"ip"`
SessionID []byte `json:"session_id"`
ClientVersion []byte `json:"client_version"`
}
// SftpAuthResponse is returned by the Panel when a pair of SFTP credentials
@@ -87,7 +79,7 @@ type SftpAuthRequest struct {
// user for the SFTP subsystem.
type SftpAuthResponse struct {
Server string `json:"server"`
User string `json:"user"`
Token string `json:"token"`
Permissions []string `json:"permissions"`
}
@@ -157,15 +149,9 @@ type BackupRemoteUploadResponse struct {
PartSize int64 `json:"part_size"`
}
type BackupPart struct {
ETag string `json:"etag"`
PartNumber int `json:"part_number"`
}
type BackupRequest struct {
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
Successful bool `json:"successful"`
Parts []BackupPart `json:"parts"`
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
Successful bool `json:"successful"`
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
"mime"
"net"
"net/http"
"net/url"
@@ -14,8 +13,8 @@ import (
"time"
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/server"
)
@@ -78,13 +77,10 @@ func (c *Counter) Write(p []byte) (int, error) {
type DownloadRequest struct {
Directory string
URL *url.URL
FileName string
UseHeader bool
}
type Download struct {
Identifier string
path string
mu sync.RWMutex
req DownloadRequest
server *server.Server
@@ -176,28 +172,8 @@ func (dl *Download) Execute() error {
}
}
if dl.req.UseHeader {
if contentDisposition := res.Header.Get("Content-Disposition"); contentDisposition != "" {
_, params, err := mime.ParseMediaType(contentDisposition)
if err != nil {
return errors.WrapIf(err, "downloader: invalid \"Content-Disposition\" header")
}
if v, ok := params["filename"]; ok {
dl.path = v
}
}
}
if dl.path == "" {
if dl.req.FileName != "" {
dl.path = dl.req.FileName
} else {
parts := strings.Split(dl.req.URL.Path, "/")
dl.path = parts[len(parts)-1]
}
}
p := dl.Path()
fnameparts := strings.Split(dl.req.URL.Path, "/")
p := filepath.Join(dl.req.Directory, fnameparts[len(fnameparts)-1])
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
@@ -229,10 +205,6 @@ func (dl *Download) Progress() float64 {
return dl.progress
}
func (dl *Download) Path() string {
return filepath.Join(dl.req.Directory, dl.path)
}
// Handles a write event by updating the progress completed percentage and firing off
// events to the server websocket as needed.
func (dl *Download) counter(contentLength int64) *Counter {

View File

@@ -4,7 +4,6 @@ import (
"github.com/apex/log"
"github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/router/middleware"
wserver "github.com/pterodactyl/wings/server"
@@ -16,7 +15,6 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
router := gin.New()
router.Use(gin.Recovery())
router.SetTrustedProxies(config.Get().Api.TrustedProxies)
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.

View File

@@ -9,7 +9,6 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/router/downloader"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/tokens"
@@ -181,7 +180,7 @@ func postServerReinstall(c *gin.Context) {
c.Status(http.StatusAccepted)
}
// Deletes a server from the wings daemon and dissociate its objects.
// Deletes a server from the wings daemon and dissociate it's objects.
func deleteServer(c *gin.Context) {
s := middleware.ExtractServer(c)

View File

@@ -13,10 +13,6 @@ import (
"strconv"
"strings"
"github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/config"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gin-gonic/gin"
@@ -39,15 +35,6 @@ func getServerFileContents(c *gin.Context) {
return
}
defer f.Close()
// Don't allow a named pipe to be opened.
//
// @see https://github.com/pterodactyl/panel/issues/4059
if st.Mode()&os.ModeNamedPipe != 0 {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Cannot open files of this type.",
})
return
}
c.Header("X-Mime-Type", st.Mimetype)
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
@@ -133,10 +120,6 @@ func putServerRenameFiles(c *gin.Context) {
// Return nil if the error is an is not exists.
// NOTE: os.IsNotExist() does not work if the error is wrapped.
if errors.Is(err, os.ErrNotExist) {
s.Log().WithField("error", err).
WithField("from_path", pf).
WithField("to_path", pt).
Warn("failed to rename: source or target does not exist")
return nil
}
return err
@@ -272,12 +255,9 @@ func postServerPullRemoteFile(c *gin.Context) {
s := ExtractServer(c)
var data struct {
// Deprecated
Directory string `binding:"required_without=RootPath,omitempty" json:"directory"`
RootPath string `binding:"required_without=Directory,omitempty" json:"root"`
URL string `binding:"required" json:"url"`
FileName string `json:"file_name"`
UseHeader bool `json:"use_header"`
Foreground bool `json:"foreground"`
Directory string `binding:"required_without=RootPath,omitempty" json:"directory"`
RootPath string `binding:"required_without=Directory,omitempty" json:"root"`
URL string `binding:"required" json:"url"`
}
if err := c.BindJSON(&data); err != nil {
return
@@ -315,41 +295,21 @@ func postServerPullRemoteFile(c *gin.Context) {
dl := downloader.New(s, downloader.DownloadRequest{
Directory: data.RootPath,
URL: u,
FileName: data.FileName,
UseHeader: data.UseHeader,
})
download := func() error {
// Execute this pull in a separate thread since it may take a long time to complete.
go func() {
s.Log().WithField("download_id", dl.Identifier).WithField("url", u.String()).Info("starting pull of remote file to disk")
if err := dl.Execute(); err != nil {
s.Log().WithField("download_id", dl.Identifier).WithField("error", err).Error("failed to pull remote file")
return err
} else {
s.Log().WithField("download_id", dl.Identifier).Info("completed pull of remote file")
}
return nil
}
if !data.Foreground {
go func() {
_ = download()
}()
c.JSON(http.StatusAccepted, gin.H{
"identifier": dl.Identifier,
})
return
}
}()
if err := download(); err != nil {
NewServerError(err, s).Abort(c)
return
}
st, err := s.Filesystem().Stat(dl.Path())
if err != nil {
NewServerError(err, s).AbortFilesystemError(c)
return
}
c.JSON(http.StatusOK, &st)
c.JSON(http.StatusAccepted, gin.H{
"identifier": dl.Identifier,
})
}
// Stops a remote file download if it exists and belongs to this server.
@@ -577,16 +537,8 @@ func postServerUploadFiles(c *gin.Context) {
directory := c.Query("directory")
maxFileSize := config.Get().Api.UploadLimit
maxFileSizeBytes := maxFileSize * 1024 * 1024
var totalSize int64
for _, header := range headers {
if header.Size > maxFileSizeBytes {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "File " + header.Filename + " is larger than the maximum file upload size of " + strconv.FormatInt(maxFileSize, 10) + " MB.",
})
return
}
totalSize += header.Size
}
@@ -602,11 +554,6 @@ func postServerUploadFiles(c *gin.Context) {
if err := handleFileUpload(p, s, header); err != nil {
NewServerError(err, s).Abort(c)
return
} else {
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.ClientIP()), server.ActivityFileUploaded, models.ActivityMeta{
"file": header.Filename,
"directory": filepath.Clean(directory),
})
}
}
}
@@ -624,5 +571,6 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader)
if err := s.Filesystem().Writefile(p, file); err != nil {
return err
}
return nil
}

View File

@@ -5,8 +5,8 @@ import (
"time"
"github.com/gin-gonic/gin"
"github.com/goccy/go-json"
ws "github.com/gorilla/websocket"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/websocket"
@@ -32,7 +32,7 @@ func getServerWebsocket(c *gin.Context) {
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel()
handler, err := websocket.GetHandler(s, c.Writer, c.Request, c)
handler, err := websocket.GetHandler(s, c.Writer, c.Request)
if err != nil {
NewServerError(err, s).Abort(c)
return

View File

@@ -12,6 +12,7 @@ import (
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"time"
"emperror.dev/errors"
@@ -29,9 +30,19 @@ import (
"github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/filesystem"
"github.com/pterodactyl/wings/system"
)
const progressWidth = 25
// Number of ticks in the progress bar
const ticks = 25
// 100% / number of ticks = percentage represented by each tick
const tickPercentage = 100 / ticks
type downloadProgress struct {
size int64
progress int64
}
// Data passed over to initiate a server transfer.
type serverTransferRequest struct {
@@ -84,7 +95,7 @@ func getServerArchive(c *gin.Context) {
return
}
// Compute sha256 checksum.
// Compute sha1 checksum.
h := sha256.New()
f, err := os.Open(archivePath)
if err != nil {
@@ -167,41 +178,17 @@ func postServerArchive(c *gin.Context) {
// Ensure the server is offline. Sometimes a "No such container" error gets through
// which means the server is already stopped. We can ignore that.
if err := s.Environment.WaitForStop(s.Context(), time.Minute, false); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") {
if err := s.Environment.WaitForStop(60, false); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") {
sendTransferLog("Failed to stop server, aborting transfer..")
l.WithField("error", err).Error("failed to stop server")
return
}
// Get the disk usage of the server (used to calculate the progress of the archive process)
rawSize, err := s.Filesystem().DiskUsage(true)
if err != nil {
sendTransferLog("Failed to get disk usage for server, aborting transfer..")
l.WithField("error", err).Error("failed to get disk usage for server")
return
}
// Create an archive of the entire server's data directory.
a := &filesystem.Archive{
BasePath: s.Filesystem().Path(),
Progress: filesystem.NewProgress(rawSize),
}
// Send the archive progress to the websocket every 3 seconds.
ctx, cancel := context.WithCancel(s.Context())
defer cancel()
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
sendTransferLog("Archiving " + p.Progress(progressWidth))
}
}
}(ctx, a.Progress, time.NewTicker(5*time.Second))
// Attempt to get an archive of the server.
if err := a.Create(getArchivePath(s.ID())); err != nil {
sendTransferLog("An error occurred while archiving the server: " + err.Error())
@@ -209,12 +196,6 @@ func postServerArchive(c *gin.Context) {
return
}
// Cancel the progress ticker.
cancel()
// Show 100% completion.
sendTransferLog("Archiving " + a.Progress.Progress(progressWidth))
sendTransferLog("Successfully created archive, attempting to notify panel..")
l.Info("successfully created server transfer archive, notifying panel..")
@@ -242,6 +223,12 @@ func postServerArchive(c *gin.Context) {
c.Status(http.StatusAccepted)
}
func (w *downloadProgress) Write(v []byte) (int, error) {
n := len(v)
atomic.AddInt64(&w.progress, int64(n))
return n, nil
}
// Log helper function to attach all errors and info output to a consistently formatted
// log string for easier querying.
func (str serverTransferRequest) log() *log.Entry {
@@ -334,7 +321,7 @@ func postTransfer(c *gin.Context) {
manager := middleware.ExtractManager(c)
u, err := uuid.Parse(data.ServerID)
if err != nil {
_ = WithError(c, err)
WithError(c, err)
return
}
// Force the server ID to be a valid UUID string at this point. If it is not an error
@@ -344,12 +331,11 @@ func postTransfer(c *gin.Context) {
data.log().Info("handling incoming server transfer request")
go func(data *serverTransferRequest) {
ctx := context.Background()
hasError := true
// Create a new server installer. This will only configure the environment and not
// run the installer scripts.
i, err := installer.New(ctx, manager, data.Server)
i, err := installer.New(context.Background(), manager, data.Server)
if err != nil {
_ = data.sendTransferStatus(manager.Client(), false)
data.log().WithField("error", err).Error("failed to validate received server data")
@@ -421,22 +407,25 @@ func postTransfer(c *gin.Context) {
sendTransferLog("Writing archive to disk...")
data.log().Info("writing transfer archive to disk...")
progress := filesystem.NewProgress(size)
// Send the archive progress to the websocket every 3 seconds.
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
sendTransferLog("Downloading " + p.Progress(progressWidth))
}
// Copy the file.
progress := &downloadProgress{size: size}
ticker := time.NewTicker(3 * time.Second)
go func(progress *downloadProgress, t *time.Ticker) {
for range ticker.C {
// p = 100 (Downloaded)
// size = 1000 (Content-Length)
// p / size = 0.1
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
// 2.5 (Number of ticks as a float64)
// 2 (convert to an integer)
p := atomic.LoadInt64(&progress.progress)
// We have to cast these numbers to float in order to get a float result from the division.
width := ((float64(p) / float64(size)) * 100) / tickPercentage
bar := strings.Repeat("=", int(width)) + strings.Repeat(" ", ticks-int(width))
sendTransferLog("Downloading [" + bar + "] " + system.FormatBytes(p) + " / " + system.FormatBytes(progress.size))
}
}(ctx, progress, time.NewTicker(5*time.Second))
}(progress, ticker)
var reader io.Reader
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
@@ -449,16 +438,18 @@ func postTransfer(c *gin.Context) {
buf := make([]byte, 1024*4)
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
ticker.Stop()
_ = file.Close()
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
data.log().WithField("error", err).Error("failed to copy archive file to disk")
return
}
cancel()
ticker.Stop()
// Show 100% completion.
sendTransferLog("Downloading " + progress.Progress(progressWidth))
humanSize := system.FormatBytes(progress.size)
sendTransferLog("Downloading [" + strings.Repeat("=", ticks) + "] " + humanSize + " / " + humanSize)
if err := file.Close(); err != nil {
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")

View File

@@ -8,7 +8,6 @@ type UploadPayload struct {
jwt.Payload
ServerUuid string `json:"server_uuid"`
UserUuid string `json:"user_uuid"`
UniqueId string `json:"unique_id"`
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3"
"github.com/goccy/go-json"
)
// The time at which Wings was booted. No JWT's created before this time are allowed to
@@ -34,15 +35,15 @@ func DenyJTI(jti string) {
denylist.Store(jti, time.Now())
}
// WebsocketPayload defines the JWT payload for a websocket connection. This JWT is passed along to
// the websocket after it has been connected to by sending an "auth" event.
// A JWT payload for Websocket connections. This JWT is passed along to the Websocket after
// it has been connected to by sending an "auth" event.
type WebsocketPayload struct {
jwt.Payload
sync.RWMutex
UserUUID string `json:"user_uuid"`
ServerUUID string `json:"server_uuid"`
Permissions []string `json:"permissions"`
UserID json.Number `json:"user_id"`
ServerUUID string `json:"server_uuid"`
Permissions []string `json:"permissions"`
}
// Returns the JWT payload.

View File

@@ -9,8 +9,6 @@ import (
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/server"
)
@@ -90,13 +88,12 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
eventChan := make(chan []byte)
eventChan := make(chan events.Event)
logOutput := make(chan []byte, 8)
installOutput := make(chan []byte, 4)
h.server.Events().On(eventChan) // TODO: make a sinky
h.server.Sink(system.LogSink).On(logOutput)
h.server.Sink(system.InstallSink).On(installOutput)
h.server.Events().On(eventChan, e...)
h.server.Sink(server.LogSink).On(logOutput)
h.server.Sink(server.InstallSink).On(installOutput)
onError := func(evt string, err2 error) {
h.Logger().WithField("event", evt).WithField("error", err2).Error("failed to send event over server websocket")
@@ -113,23 +110,19 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
select {
case <-ctx.Done():
break
case b := <-logOutput:
sendErr := h.SendJson(Message{Event: server.ConsoleOutputEvent, Args: []string{string(b)}})
case e := <-logOutput:
sendErr := h.SendJson(Message{Event: server.ConsoleOutputEvent, Args: []string{string(e)}})
if sendErr == nil {
continue
}
onError(server.ConsoleOutputEvent, sendErr)
case b := <-installOutput:
sendErr := h.SendJson(Message{Event: server.InstallOutputEvent, Args: []string{string(b)}})
case e := <-installOutput:
sendErr := h.SendJson(Message{Event: server.InstallOutputEvent, Args: []string{string(e)}})
if sendErr == nil {
continue
}
onError(server.InstallOutputEvent, sendErr)
case b := <-eventChan:
var e events.Event
if err := events.DecodeTo(b, &e); err != nil {
continue
}
case e := <-eventChan:
var sendErr error
message := Message{Event: e.Topic}
if str, ok := e.Data.(string); ok {
@@ -155,9 +148,9 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
}
// These functions will automatically close the channel if it hasn't been already.
h.server.Events().Off(eventChan)
h.server.Sink(system.LogSink).Off(logOutput)
h.server.Sink(system.InstallSink).Off(installOutput)
h.server.Events().Off(eventChan, e...)
h.server.Sink(server.LogSink).Off(logOutput)
h.server.Sink(server.InstallSink).Off(installOutput)
// If the internal context is stopped it is either because the parent context
// got canceled or because we ran into an error. If the "err" variable is nil

View File

@@ -8,17 +8,12 @@ import (
"sync"
"time"
"github.com/pterodactyl/wings/internal/models"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3"
"github.com/gin-gonic/gin"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/pterodactyl/wings/system"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
@@ -44,7 +39,6 @@ type Handler struct {
Connection *websocket.Conn `json:"-"`
jwt *tokens.WebsocketPayload
server *server.Server
ra server.RequestActivity
uuid uuid.UUID
}
@@ -82,7 +76,7 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
}
// GetHandler returns a new websocket handler using the context provided.
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin.Context) (*Handler, error) {
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Handler, error) {
upgrader := websocket.Upgrader{
// Ensure that the websocket request is originating from the Panel itself,
// and not some other location.
@@ -114,7 +108,6 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin
Connection: conn,
jwt: nil,
server: s,
ra: s.NewRequestActivity("", c.ClientIP()),
uuid: u,
}, nil
}
@@ -270,7 +263,6 @@ func (h *Handler) GetJwt() *tokens.WebsocketPayload {
// setJwt sets the JWT for the websocket in a race-safe manner.
func (h *Handler) setJwt(token *tokens.WebsocketPayload) {
h.Lock()
h.ra = h.ra.SetUser(token.UserUUID)
h.jwt = token
h.Unlock()
}
@@ -361,7 +353,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
}
err := h.server.HandlePowerAction(action)
if errors.Is(err, system.ErrLockerLocked) {
if errors.Is(err, context.DeadlineExceeded) {
m, _ := h.GetErrorMessage("another power action is currently being processed for this server, please try again later")
_ = h.SendJson(Message{
@@ -372,10 +364,6 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
return nil
}
if err == nil {
h.server.SaveActivity(h.ra, models.Event(server.ActivityPowerPrefix+action), nil)
}
return err
}
case SendServerLogsEvent:
@@ -432,13 +420,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
}
}
if err := h.server.Environment.SendCommand(strings.Join(m.Args, "")); err != nil {
return err
}
h.server.SaveActivity(h.ra, server.ActivityConsoleCommand, models.ActivityMeta{
"command": strings.Join(m.Args, ""),
})
return nil
return h.server.Environment.SendCommand(strings.Join(m.Args, ""))
}
}

View File

@@ -1,5 +1,5 @@
Name: ptero-wings
Version: 1.7.0
Version: 1.5.3
Release: 1%{?dist}
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
BuildArch: x86_64
@@ -91,9 +91,6 @@ rm -rf /var/log/pterodactyl
wings --version
%changelog
* Wed Sep 14 2022 Chance Callahan <ccallaha@redhat.com> - 1.7.0-1
- Updating specfile to match stable release.
* Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3

View File

@@ -1,66 +0,0 @@
package server
import (
"context"
"time"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
)
const ActivityPowerPrefix = "server:power."
const (
ActivityConsoleCommand = models.Event("server:console.command")
ActivitySftpWrite = models.Event("server:sftp.write")
ActivitySftpCreate = models.Event("server:sftp.create")
ActivitySftpCreateDirectory = models.Event("server:sftp.create-directory")
ActivitySftpRename = models.Event("server:sftp.rename")
ActivitySftpDelete = models.Event("server:sftp.delete")
ActivityFileUploaded = models.Event("server:file.uploaded")
)
// RequestActivity is a wrapper around a LoggedEvent that is able to track additional request
// specific metadata including the specific user and IP address associated with all subsequent
// events. The internal logged event structure can be extracted by calling RequestEvent.Event().
type RequestActivity struct {
server string
user string
ip string
}
// Event returns the underlying logged event from the RequestEvent instance and sets the
// specific event and metadata on it.
func (ra RequestActivity) Event(event models.Event, metadata models.ActivityMeta) *models.Activity {
a := models.Activity{Server: ra.server, IP: ra.ip, Event: event, Metadata: metadata}
return a.SetUser(ra.user)
}
// SetUser clones the RequestActivity struct and sets a new user value on the copy
// before returning it.
func (ra RequestActivity) SetUser(u string) RequestActivity {
c := ra
c.user = u
return c
}
func (s *Server) NewRequestActivity(user string, ip string) RequestActivity {
return RequestActivity{server: s.ID(), user: user, ip: ip}
}
// SaveActivity saves an activity entry to the database in a background routine. If an error is
// encountered it is logged but not returned to the caller.
func (s *Server) SaveActivity(a RequestActivity, event models.Event, metadata models.ActivityMeta) {
ctx, cancel := context.WithTimeout(s.Context(), time.Second*3)
go func() {
defer cancel()
if tx := database.Instance().WithContext(ctx).Create(a.Event(event, metadata)); tx.Error != nil {
s.Log().WithField("error", errors.WithStack(tx.Error)).
WithField("event", event).
Error("activity: failed to save event")
}
}()
}

View File

@@ -142,7 +142,7 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
// instance, otherwise you'll likely hit all types of write errors due to the
// server being suspended.
if s.Environment.State() != environment.ProcessOfflineState {
if err = s.Environment.WaitForStop(s.Context(), time.Minute*2, false); err != nil {
if err = s.Environment.WaitForStop(120, false); err != nil {
if !client.IsErrNotFound(err) {
return errors.WrapIf(err, "server/backup: restore: failed to wait for container stop")
}

View File

@@ -32,7 +32,7 @@ type RestoreCallback func(file string, r io.Reader, mode fs.FileMode, atime, mti
// noinspection GoNameStartsWithPackageName
type BackupInterface interface {
// SetClient sets the API request client on the backup interface.
SetClient(remote.Client)
SetClient(c remote.Client)
// Identifier returns the UUID of this backup as tracked by the panel
// instance.
Identifier() string
@@ -41,7 +41,7 @@ type BackupInterface interface {
WithLogContext(map[string]interface{})
// Generate creates a backup in whatever the configured source for the
// specific implementation is.
Generate(context.Context, string, string) (*ArchiveDetails, error)
Generate(ctx context.Context, basePath string, ignore string) (*ArchiveDetails, error)
// Ignored returns the ignored files for this backup instance.
Ignored() string
// Checksum returns a SHA1 checksum for the generated backup.
@@ -53,13 +53,13 @@ type BackupInterface interface {
// to store it until it is moved to the final spot.
Path() string
// Details returns details about the archive.
Details(context.Context, []remote.BackupPart) (*ArchiveDetails, error)
Details(ctx context.Context) (*ArchiveDetails, error)
// Remove removes a backup file.
Remove() error
// Restore is called when a backup is ready to be restored to the disk from
// the given source. Not every backup implementation will support this nor
// will every implementation require a reader be provided.
Restore(context.Context, io.Reader, RestoreCallback) error
Restore(ctx context.Context, reader io.Reader, callback RestoreCallback) error
}
type Backup struct {
@@ -119,8 +119,8 @@ func (b *Backup) Checksum() ([]byte, error) {
// Details returns both the checksum and size of the archive currently stored on
// the disk to the caller.
func (b *Backup) Details(ctx context.Context, parts []remote.BackupPart) (*ArchiveDetails, error) {
ad := ArchiveDetails{ChecksumType: "sha1", Parts: parts}
func (b *Backup) Details(ctx context.Context) (*ArchiveDetails, error) {
ad := ArchiveDetails{ChecksumType: "sha1"}
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
@@ -162,10 +162,9 @@ func (b *Backup) log() *log.Entry {
}
type ArchiveDetails struct {
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
Parts []remote.BackupPart `json:"parts"`
Checksum string `json:"checksum"`
ChecksumType string `json:"checksum_type"`
Size int64 `json:"size"`
}
// ToRequest returns a request object.
@@ -175,6 +174,5 @@ func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
ChecksumType: ad.ChecksumType,
Size: ad.Size,
Successful: successful,
Parts: ad.Parts,
}
}

View File

@@ -69,7 +69,7 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
}
b.log().Info("created backup successfully")
ad, err := b.Details(ctx, nil)
ad, err := b.Details(ctx)
if err != nil {
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
}

View File

@@ -71,11 +71,10 @@ func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*Arch
}
defer rc.Close()
parts, err := s.generateRemoteRequest(ctx, rc)
if err != nil {
if err := s.generateRemoteRequest(ctx, rc); err != nil {
return nil, err
}
ad, err := s.Details(ctx, parts)
ad, err := s.Details(ctx)
if err != nil {
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
}
@@ -126,20 +125,20 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
}
// Generates the remote S3 request and begins the upload.
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) ([]remote.BackupPart, error) {
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) error {
defer rc.Close()
s.log().Debug("attempting to get size of backup...")
size, err := s.Backup.Size()
if err != nil {
return nil, err
return err
}
s.log().WithField("size", size).Debug("got size of backup")
s.log().Debug("attempting to get S3 upload urls from Panel...")
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
if err != nil {
return nil, err
return err
}
s.log().Debug("got S3 upload urls from the Panel")
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
@@ -157,26 +156,22 @@ func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser)
}
// Attempt to upload the part.
etag, err := uploader.uploadPart(ctx, part, partSize)
if err != nil {
if _, err := uploader.uploadPart(ctx, part, partSize); err != nil {
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
return nil, err
return err
}
uploader.uploadedParts = append(uploader.uploadedParts, remote.BackupPart{
ETag: etag,
PartNumber: i + 1,
})
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
}
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
return uploader.uploadedParts, nil
return nil
}
type s3FileUploader struct {
io.ReadCloser
client *http.Client
uploadedParts []remote.BackupPart
client *http.Client
}
// newS3FileUploader returns a new file uploader instance.

View File

@@ -6,14 +6,12 @@ import (
"github.com/gammazero/workerpool"
)
// UpdateConfigurationFiles updates all of the defined configuration files for
// a server automatically to ensure that they always use the specified values.
// Parent function that will update all of the defined configuration files for a server
// automatically to ensure that they always use the specified values.
func (s *Server) UpdateConfigurationFiles() {
pool := workerpool.New(runtime.NumCPU())
s.Log().Debug("acquiring process configuration files...")
files := s.ProcessConfiguration().ConfigurationFiles
s.Log().Debug("acquired process configuration files")
for _, cf := range files {
f := cf
@@ -28,8 +26,6 @@ func (s *Server) UpdateConfigurationFiles() {
if err := f.Parse(p, false); err != nil {
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
}
s.Log().WithField("path", f.FileName).Debug("finished processing server configuration file")
})
}

View File

@@ -16,11 +16,6 @@ type EggConfiguration struct {
FileDenylist []string `json:"file_denylist"`
}
type ConfigurationMeta struct {
Name string `json:"name"`
Description string `json:"description"`
}
type Configuration struct {
mu sync.RWMutex
@@ -29,8 +24,6 @@ type Configuration struct {
// docker containers as well as in log output.
Uuid string `json:"uuid"`
Meta ConfigurationMeta `json:"meta"`
// Whether or not the server is in a suspended state. Suspended servers cannot
// be started or modified except in certain scenarios by an admin user.
Suspended bool `json:"suspended"`

View File

@@ -6,7 +6,6 @@ import (
"time"
"github.com/mitchellh/colorstring"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)

View File

@@ -19,7 +19,7 @@ func TestName(t *testing.T) {
})
g.It("calls strike once per time period", func() {
t := newConsoleThrottle(1, time.Millisecond*20)
t := newConsoleThrottle(1, time.Millisecond * 20)
var times int
t.strike = func() {
@@ -53,10 +53,10 @@ func TestName(t *testing.T) {
}
func BenchmarkConsoleThrottle(b *testing.B) {
t := newConsoleThrottle(10, time.Millisecond*10)
t := newConsoleThrottle(10, time.Millisecond * 10)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t.Allow()
}
for i := 0; i < b.N; i++ {
t.Allow()
}
}

View File

@@ -2,7 +2,6 @@ package server
import (
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
)
// Defines all of the possible output events for a server.
@@ -21,7 +20,7 @@ const (
TransferStatusEvent = "transfer status"
)
// Events returns the server's emitter instance.
// Returns the server's emitter instance.
func (s *Server) Events() *events.Bus {
s.emitterLock.Lock()
defer s.emitterLock.Unlock()
@@ -32,24 +31,3 @@ func (s *Server) Events() *events.Bus {
return s.emitter
}
// Sink returns the instantiated and named sink for a server. If the sink has
// not been configured yet this function will cause a panic condition.
func (s *Server) Sink(name system.SinkName) *system.SinkPool {
sink, ok := s.sinks[name]
if !ok {
s.Log().Fatalf("attempt to access nil sink: %s", name)
}
return sink
}
// DestroyAllSinks iterates over all of the sinks configured for the server and
// destroys their instances. Note that this will cause a panic if you attempt
// to call Server.Sink() again after. This function is only used when a server
// is being deleted from the system.
func (s *Server) DestroyAllSinks() {
s.Log().Info("destroying all registered sinks for server instance")
for _, sink := range s.sinks {
sink.Destroy()
}
}

View File

@@ -8,7 +8,6 @@ import (
"path/filepath"
"strings"
"sync"
"sync/atomic"
"emperror.dev/errors"
"github.com/apex/log"
@@ -18,7 +17,6 @@ import (
ignore "github.com/sabhiram/go-gitignore"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
)
const memory = 4 * 1024
@@ -30,62 +28,6 @@ var pool = sync.Pool{
},
}
// Progress is used to track the progress of any I/O operation that are being
// performed.
type Progress struct {
// written is the total size of the files that have been written to the writer.
written int64
// Total is the total size of the archive in bytes.
total int64
// w .
w io.Writer
}
// NewProgress .
func NewProgress(total int64) *Progress {
return &Progress{total: total}
}
// Written returns the total number of bytes written.
// This function should be used when the progress is tracking data being written.
func (p *Progress) Written() int64 {
return atomic.LoadInt64(&p.written)
}
// Total returns the total size in bytes.
func (p *Progress) Total() int64 {
return atomic.LoadInt64(&p.total)
}
// Write totals the number of bytes that have been written to the writer.
func (p *Progress) Write(v []byte) (int, error) {
n := len(v)
atomic.AddInt64(&p.written, int64(n))
if p.w != nil {
return p.w.Write(v)
}
return n, nil
}
// Progress returns a formatted progress string for the current progress.
func (p *Progress) Progress(width int) string {
current := p.Written()
total := p.Total()
// v = 100 (Progress)
// size = 1000 (Content-Length)
// p / size = 0.1
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
// 2.5 (Number of ticks as a float64)
// 2 (convert to an integer)
// We have to cast these numbers to float in order to get a float result from the division.
ticks := ((float64(current) / float64(total)) * 100) / (float64(100) / float64(width))
bar := strings.Repeat("=", int(ticks)) + strings.Repeat(" ", width-int(ticks))
return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total)
}
type Archive struct {
// BasePath is the absolute path to create the archive from where Files and Ignore are
// relative to.
@@ -98,13 +40,10 @@ type Archive struct {
// Files specifies the files to archive, this takes priority over the Ignore option, if
// unspecified, all files in the BasePath will be archived unless Ignore is set.
Files []string
// Progress wraps the writer of the archive to pass through the progress tracker.
Progress *Progress
}
// Create creates an archive at dst with all the files defined in the
// included Files array.
// Create creates an archive at dst with all of the files defined in the
// included files struct.
func (a *Archive) Create(dst string) error {
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
@@ -123,34 +62,13 @@ func (a *Archive) Create(dst string) error {
writer = f
}
// Choose which compression level to use based on the compression_level configuration option
var compressionLevel int
switch config.Get().System.Backups.CompressionLevel {
case "none":
compressionLevel = pgzip.NoCompression
case "best_compression":
compressionLevel = pgzip.BestCompression
case "best_speed":
fallthrough
default:
compressionLevel = pgzip.BestSpeed
}
// Create a new gzip writer around the file.
gw, _ := pgzip.NewWriterLevel(writer, compressionLevel)
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
_ = gw.SetConcurrency(1<<20, 1)
defer gw.Close()
var pw io.Writer
if a.Progress != nil {
a.Progress.w = gw
pw = a.Progress
} else {
pw = gw
}
// Create a new tar writer around the gzip writer.
tw := tar.NewWriter(pw)
tw := tar.NewWriter(gw)
defer tw.Close()
// Configure godirwalk.
@@ -185,7 +103,7 @@ func (a *Archive) Create(dst string) error {
// being generated.
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
return func(path string, de *godirwalk.Dirent) error {
// Skip directories because we are walking them recursively.
// Skip directories because we walking them recursively.
if de.IsDir() {
return nil
}
@@ -212,7 +130,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
for _, f := range a.Files {
// If the given doesn't match, or doesn't have the same prefix continue
// to the next item in the loop.
if p != f && !strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", f) {
if p != f && !strings.HasPrefix(p, f) {
continue
}
@@ -230,7 +148,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
// Adds a given file path to the final archive being created.
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
// Lstat the file, this will give us the same information as Stat except that it will not
// follow a symlink to its target automatically. This is important to avoid including
// follow a symlink to it's target automatically. This is important to avoid including
// files that exist outside the server root unintentionally in the backup.
s, err := os.Lstat(p)
if err != nil {

View File

@@ -8,14 +8,10 @@ import (
"os"
"path"
"path/filepath"
"reflect"
"strings"
"sync/atomic"
"time"
gzip2 "github.com/klauspost/compress/gzip"
zip2 "github.com/klauspost/compress/zip"
"emperror.dev/errors"
"github.com/mholt/archiver/v3"
)
@@ -176,26 +172,13 @@ func ExtractNameFromArchive(f archiver.File) string {
return f.Name()
}
switch s := sys.(type) {
case *zip.FileHeader:
return s.Name
case *zip2.FileHeader:
return s.Name
case *tar.Header:
return s.Name
case *gzip.Header:
return s.Name
case *gzip2.Header:
case *zip.FileHeader:
return s.Name
default:
// At this point we cannot figure out what type of archive this might be so
// just try to find the name field in the struct. If it is found return it.
field := reflect.Indirect(reflect.ValueOf(sys)).FieldByName("Name")
if field.IsValid() {
return field.String()
}
// Fallback to the basename of the file at this point. There is nothing we can really
// do to try and figure out what the underlying directory of the file is supposed to
// be since it didn't implement a name field.
return f.Name()
}
}

View File

@@ -1,8 +1,6 @@
package filesystem
import (
"context"
"github.com/pterodactyl/wings/internal/vhd"
"sync"
"sync/atomic"
"syscall"
@@ -37,46 +35,18 @@ func (ult *usageLookupTime) Get() time.Time {
return ult.value
}
// MaxDisk returns the maximum amount of disk space that this Filesystem
// instance is allowed to use.
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
func (fs *Filesystem) MaxDisk() int64 {
return atomic.LoadInt64(&fs.diskLimit)
}
// SetDiskLimit sets the disk space limit for this Filesystem instance. This
// logic will also handle mounting or unmounting a virtual disk if it is being
// used currently.
func (fs *Filesystem) SetDiskLimit(ctx context.Context, i int64) error {
// Do nothing if this method is called but the limit is not changing.
if atomic.LoadInt64(&fs.diskLimit) == i {
return nil
}
if vhd.Enabled() {
if i == 0 && fs.IsVirtual() {
fs.log().Debug("disk limit changed to 0, destroying virtual disk")
// Remove the VHD if it is mounted so that we're just storing files directly on the system
// since we cannot have a virtual disk with a space limit enforced like that.
if err := fs.vhd.Destroy(ctx); err != nil {
return errors.WithStackIf(err)
}
fs.vhd = nil
}
// If we're setting a disk size go ahead and mount the VHD if it isn't already mounted,
// and then allocate the new space to the disk.
if i > 0 {
fs.log().Debug("disk limit updated, allocating new space to virtual disk")
if err := fs.ConfigureDisk(ctx, i); err != nil {
return errors.WithStackIf(err)
}
}
}
fs.log().WithField("limit", i).Debug("disk limit updated")
atomic.StoreInt64(&fs.diskLimit, i)
return nil
// Sets the disk space limit for this Filesystem instance.
func (fs *Filesystem) SetDiskLimit(i int64) {
atomic.SwapInt64(&fs.diskLimit, i)
}
// HasSpaceErr is the same concept as HasSpaceAvailable however this will return
// an error if there is no space, rather than a boolean value.
// The same concept as HasSpaceAvailable however this will return an error if there is
// no space, rather than a boolean value.
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
if !fs.HasSpaceAvailable(allowStaleValue) {
return newFilesystemError(ErrCodeDiskSpace, nil)
@@ -84,77 +54,67 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
return nil
}
// HasSpaceAvailable determines if the directory a file is trying to be added to
// has enough space available for the file to be written to.
// Determines if the directory a file is trying to be added to has enough space available
// for the file to be written to.
//
// Because determining the amount of space being used by a server is a taxing
// operation we will load it all up into a cache and pull from that as long as
// the key is not expired. This operation will potentially block unless
// allowStaleValue is set to true. See the documentation on DiskUsage for how
// this affects the call.
// Because determining the amount of space being used by a server is a taxing operation we
// will load it all up into a cache and pull from that as long as the key is not expired.
//
// If the current size of the disk is larger than the maximum allowed size this
// function will return false, in all other cases it will return true. We do
// not check the existence of a virtual disk at this point since this logic is
// used to return friendly error messages to users, and also prevent us wasting
// time on more taxing operations when we know the result will end up failing due
// to space limits.
//
// If the servers disk limit is set to 0 it means there is no limit, however the
// DiskUsage method is still called to keep the cache warm. This function will
// always return true for a server with no limit set.
// This operation will potentially block unless allowStaleValue is set to true. See the
// documentation on DiskUsage for how this affects the call.
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
size, err := fs.DiskUsage(allowStaleValue)
if err != nil {
fs.log().WithField("error", err).Warn("failed to determine root fs directory size")
log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
}
return fs.MaxDisk() == 0 || size <= fs.MaxDisk()
// If space is -1 or 0 just return true, means they're allowed unlimited.
//
// Technically we could skip disk space calculation because we don't need to check if the
// server exceeds it's limit but because this method caches the disk usage it would be best
// to calculate the disk usage and always return true.
if fs.MaxDisk() == 0 {
return true
}
return size <= fs.MaxDisk()
}
// CachedUsage returns the cached value for the amount of disk space used by the
// filesystem. Do not rely on this function for critical logical checks. It
// should only be used in areas where the actual disk usage does not need to be
// perfect, e.g. API responses for server resource usage.
// Returns the cached value for the amount of disk space used by the filesystem. Do not rely on this
// function for critical logical checks. It should only be used in areas where the actual disk usage
// does not need to be perfect, e.g. API responses for server resource usage.
func (fs *Filesystem) CachedUsage() int64 {
return atomic.LoadInt64(&fs.diskUsed)
}
// DiskUsage is an internal helper function to allow other parts of the codebase
// to check the total used disk space as needed without overly taxing the system.
// This will prioritize the value from the cache to avoid excessive IO usage. We
// will only walk the filesystem and determine the size of the directory if there
// Internal helper function to allow other parts of the codebase to check the total used disk space
// as needed without overly taxing the system. This will prioritize the value from the cache to avoid
// excessive IO usage. We will only walk the filesystem and determine the size of the directory if there
// is no longer a cached value.
//
// If "allowStaleValue" is set to true, a stale value MAY be returned to the
// caller if there is an expired cache value AND there is currently another
// lookup in progress. If there is no cached value but no other lookup is in
// progress, a fresh disk space response will be returned to the caller.
// If "allowStaleValue" is set to true, a stale value MAY be returned to the caller if there is an
// expired cache value AND there is currently another lookup in progress. If there is no cached value but
// no other lookup is in progress, a fresh disk space response will be returned to the caller.
//
// This is primarily to avoid a bunch of I/O operations from piling up on the
// server, especially on servers with a large amount of files.
// This is primarily to avoid a bunch of I/O operations from piling up on the server, especially on servers
// with a large amount of files.
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
// A disk check interval of 0 means this functionality is completely disabled.
if fs.diskCheckInterval == 0 {
return 0, nil
}
since := time.Now().Add(time.Second * fs.diskCheckInterval * -1)
// If the last lookup time was before our calculated limit we will re-execute this
// checking logic. If the lookup time was after the oldest possible timestamp we will
// continue returning the cached value.
if fs.lastLookupTime.Get().Before(since) {
if !fs.lastLookupTime.Get().After(time.Now().Add(time.Second * fs.diskCheckInterval * -1)) {
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
// value. This is a blocking operation to the calling process.
if !allowStaleValue {
return fs.updateCachedDiskUsage()
}
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
// currently performing a lookup, just do the disk usage calculation in the background.
if !fs.lookupInProgress.Load() {
} else if !fs.lookupInProgress.Load() {
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
// currently performing a lookup, just do the disk usage calculation in the background.
go func(fs *Filesystem) {
if _, err := fs.updateCachedDiskUsage(); err != nil {
fs.log().WithField("error", err).Warn("failed to update fs disk usage from within routine")
log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
}
}(fs)
}
@@ -234,14 +194,11 @@ func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
}
// HasSpaceFor is a function to determine if a server has space available for a
// file of a given size. If space is available, no error will be returned,
// otherwise an ErrNotEnoughSpace error will be raised. If this filesystem is
// configured as a virtual disk this function is a no-op as we will fall through
// to the native implementation to throw back an error if there is not disk
// space available.
// Helper function to determine if a server has space available for a file of a given size.
// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
// will be raised.
func (fs *Filesystem) HasSpaceFor(size int64) error {
if fs.IsVirtual() || fs.MaxDisk() == 0 {
if fs.MaxDisk() == 0 {
return nil
}
s, err := fs.DiskUsage(true)
@@ -277,7 +234,3 @@ func (fs *Filesystem) addDisk(i int64) int64 {
return atomic.AddInt64(&fs.diskUsed, i)
}
func (fs *Filesystem) log() *log.Entry {
return log.WithField("server", fs.uuid).WithField("root", fs.root)
}

View File

@@ -20,7 +20,6 @@ import (
ignore "github.com/sabhiram/go-gitignore"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/internal/vhd"
"github.com/pterodactyl/wings/system"
)
@@ -31,23 +30,19 @@ type Filesystem struct {
diskUsed int64
diskCheckInterval time.Duration
denylist *ignore.GitIgnore
vhd *vhd.Disk
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
diskLimit int64
// The root data directory path for this Filesystem instance.
root string
uuid string
isTest bool
}
// New creates a new Filesystem instance for a given server.
func New(uuid string, size int64, denylist []string) *Filesystem {
root := filepath.Join(config.Get().System.Data, uuid)
fs := Filesystem{
uuid: uuid,
func New(root string, size int64, denylist []string) *Filesystem {
return &Filesystem{
root: root,
diskLimit: size,
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
@@ -55,15 +50,6 @@ func New(uuid string, size int64, denylist []string) *Filesystem {
lookupInProgress: system.NewAtomicBool(false),
denylist: ignore.CompileIgnoreLines(denylist...),
}
// If VHD support is enabled but this server is configured with no disk size
// limit we cannot actually use a virtual disk. In that case fall back to using
// the default driver.
if vhd.Enabled() && size > 0 {
fs.vhd = vhd.New(size, vhd.DiskPath(uuid), fs.root)
}
return &fs
}
// Path returns the root path for the Filesystem instance.
@@ -91,9 +77,9 @@ func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
return f, st, nil
}
// Touch acts by creating the given file and path on the disk if it is not present
// already. If it is present, the file is opened using the defaults which will
// truncate the contents. The opened file is then returned to the caller.
// Acts by creating the given file and path on the disk if it is not present already. If
// it is present, the file is opened using the defaults which will truncate the contents.
// The opened file is then returned to the caller.
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
cleaned, err := fs.SafePath(p)
if err != nil {
@@ -129,6 +115,19 @@ func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
return f, nil
}
// Reads a file on the system and returns it as a byte representation in a file
// reader. This is not the most memory efficient usage since it will be reading the
// entirety of the file into memory.
func (fs *Filesystem) Readfile(p string, w io.Writer) error {
file, _, err := fs.File(p)
if err != nil {
return err
}
defer file.Close()
_, err = bufio.NewReader(file).WriteTo(w)
return err
}
// Writefile writes a file to the system. If the file does not already exist one
// will be created. This will also properly recalculate the disk space used by
// the server when writing new files or modifying existing ones.
@@ -169,12 +168,6 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
buf := make([]byte, 1024*4)
sz, err := io.CopyBuffer(file, r, buf)
if err != nil {
if strings.Contains(err.Error(), "no space left on device") {
return newFilesystemError(ErrCodeDiskSpace, err)
}
return errors.WrapIf(err, "filesystem: failed to copy buffer for file write")
}
// Adjust the disk usage to account for the old size and the new size of the file.
fs.addDisk(sz - currentSize)
@@ -191,16 +184,16 @@ func (fs *Filesystem) CreateDirectory(name string, p string) error {
return os.MkdirAll(cleaned, 0o755)
}
// Rename moves (or renames) a file or directory.
// Moves (or renames) a file or directory.
func (fs *Filesystem) Rename(from string, to string) error {
cleanedFrom, err := fs.SafePath(from)
if err != nil {
return errors.WithStack(err)
return err
}
cleanedTo, err := fs.SafePath(to)
if err != nil {
return errors.WithStack(err)
return err
}
// If the target file or directory already exists the rename function will fail, so just
@@ -222,10 +215,7 @@ func (fs *Filesystem) Rename(from string, to string) error {
}
}
if err := os.Rename(cleanedFrom, cleanedTo); err != nil {
return errors.WithStack(err)
}
return nil
return os.Rename(cleanedFrom, cleanedTo)
}
// Recursively iterates over a file or directory and sets the permissions on all of the
@@ -332,9 +322,8 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
return name + suffix + extension, nil
}
// Copy takes a given input file path and creates a copy of the file at the same
// location, appending a unique number to the end. For example, a copy of "test.txt"
// would create "test 2.txt" as the copy, then "test 3.txt" and so on.
// Copies a given file to the same location and appends a suffix to the file to indicate that
// it has been copied.
func (fs *Filesystem) Copy(p string) error {
cleaned, err := fs.SafePath(p)
if err != nil {
@@ -503,11 +492,7 @@ func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
cleanedp, _ = fs.SafePath(filepath.Join(cleaned, f.Name()))
}
// Don't try to detect the type on a pipe — this will just hang the application and
// you'll never get a response back.
//
// @see https://github.com/pterodactyl/panel/issues/4059
if cleanedp != "" && f.Mode()&os.ModeNamedPipe == 0 {
if cleanedp != "" {
m, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
} else {
// Just pass this for an unknown type because the file could not safely be resolved within

View File

@@ -1,7 +1,6 @@
package filesystem
import (
"bufio"
"bytes"
"errors"
"math/rand"
@@ -45,14 +44,6 @@ type rootFs struct {
root string
}
func getFileContent(file *os.File) string {
var w bytes.Buffer
if _, err := bufio.NewReader(file).WriteTo(&w); err != nil {
panic(err)
}
return w.String()
}
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
f, err := os.Create(filepath.Join(rfs.root, "/server", p))
@@ -84,6 +75,54 @@ func (rfs *rootFs) reset() {
}
}
func TestFilesystem_Readfile(t *testing.T) {
g := Goblin(t)
fs, rfs := NewFs()
g.Describe("Readfile", func() {
buf := &bytes.Buffer{}
g.It("opens a file if it exists on the system", func() {
err := rfs.CreateServerFileFromString("test.txt", "testing")
g.Assert(err).IsNil()
err = fs.Readfile("test.txt", buf)
g.Assert(err).IsNil()
g.Assert(buf.String()).Equal("testing")
})
g.It("returns an error if the file does not exist", func() {
err := fs.Readfile("test.txt", buf)
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
})
g.It("returns an error if the \"file\" is a directory", func() {
err := os.Mkdir(filepath.Join(rfs.root, "/server/test.txt"), 0o755)
g.Assert(err).IsNil()
err = fs.Readfile("test.txt", buf)
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodeIsDirectory)).IsTrue()
})
g.It("cannot open a file outside the root directory", func() {
err := rfs.CreateServerFileFromString("/../test.txt", "testing")
g.Assert(err).IsNil()
err = fs.Readfile("/../test.txt", buf)
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
g.AfterEach(func() {
buf.Truncate(0)
atomic.StoreInt64(&fs.diskUsed, 0)
rfs.reset()
})
})
}
func TestFilesystem_Writefile(t *testing.T) {
g := Goblin(t)
fs, rfs := NewFs()
@@ -101,10 +140,9 @@ func TestFilesystem_Writefile(t *testing.T) {
err := fs.Writefile("test.txt", r)
g.Assert(err).IsNil()
f, _, err := fs.File("test.txt")
err = fs.Readfile("test.txt", buf)
g.Assert(err).IsNil()
defer f.Close()
g.Assert(getFileContent(f)).Equal("test file content")
g.Assert(buf.String()).Equal("test file content")
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(r.Size())
})
@@ -114,10 +152,9 @@ func TestFilesystem_Writefile(t *testing.T) {
err := fs.Writefile("/some/nested/test.txt", r)
g.Assert(err).IsNil()
f, _, err := fs.File("/some/nested/test.txt")
err = fs.Readfile("/some/nested/test.txt", buf)
g.Assert(err).IsNil()
defer f.Close()
g.Assert(getFileContent(f)).Equal("test file content")
g.Assert(buf.String()).Equal("test file content")
})
g.It("can create a new file inside a nested directory without a trailing slash", func() {
@@ -126,10 +163,9 @@ func TestFilesystem_Writefile(t *testing.T) {
err := fs.Writefile("some/../foo/bar/test.txt", r)
g.Assert(err).IsNil()
f, _, err := fs.File("foo/bar/test.txt")
err = fs.Readfile("foo/bar/test.txt", buf)
g.Assert(err).IsNil()
defer f.Close()
g.Assert(getFileContent(f)).Equal("test file content")
g.Assert(buf.String()).Equal("test file content")
})
g.It("cannot create a file outside the root directory", func() {
@@ -154,6 +190,28 @@ func TestFilesystem_Writefile(t *testing.T) {
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
})
/*g.It("updates the total space used when a file is appended to", func() {
atomic.StoreInt64(&fs.diskUsed, 100)
b := make([]byte, 100)
_, _ = rand.Read(b)
r := bytes.NewReader(b)
err := fs.Writefile("test.txt", r)
g.Assert(err).IsNil()
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(200))
// If we write less data than already exists, we should expect the total
// disk used to be decremented.
b = make([]byte, 50)
_, _ = rand.Read(b)
r = bytes.NewReader(b)
err = fs.Writefile("test.txt", r)
g.Assert(err).IsNil()
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(150))
})*/
g.It("truncates the file when writing new contents", func() {
r := bytes.NewReader([]byte("original data"))
err := fs.Writefile("test.txt", r)
@@ -163,10 +221,9 @@ func TestFilesystem_Writefile(t *testing.T) {
err = fs.Writefile("test.txt", r)
g.Assert(err).IsNil()
f, _, err := fs.File("test.txt")
err = fs.Readfile("test.txt", buf)
g.Assert(err).IsNil()
defer f.Close()
g.Assert(getFileContent(f)).Equal("new data")
g.Assert(buf.String()).Equal("new data")
})
g.AfterEach(func() {

View File

@@ -119,6 +119,16 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
panic(err)
}
g.Describe("Readfile", func() {
g.It("cannot read a file symlinked outside the root", func() {
b := bytes.Buffer{}
err := fs.Readfile("symlinked.txt", &b)
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
})
})
g.Describe("Writefile", func() {
g.It("cannot write to a file symlinked outside the root", func() {
r := bytes.NewReader([]byte("testing"))

View File

@@ -1,42 +0,0 @@
package filesystem
import (
"context"
"emperror.dev/errors"
"github.com/pterodactyl/wings/internal/vhd"
)
// IsVirtual returns true if the filesystem is currently using a virtual disk.
func (fs *Filesystem) IsVirtual() bool {
return fs.vhd != nil
}
// ConfigureDisk will attempt to create a new VHD if there is not one already
// created for the filesystem. If there is this method will attempt to resize
// the underlying data volume. Passing a size of 0 or less will panic.
func (fs *Filesystem) ConfigureDisk(ctx context.Context, size int64) error {
if size <= 0 {
panic("filesystem: attempt to configure disk with empty size")
}
if fs.vhd == nil {
fs.vhd = vhd.New(size, vhd.DiskPath(fs.uuid), fs.root)
if err := fs.MountDisk(ctx); err != nil {
return errors.WithStackIf(err)
}
}
// Resize the disk now that it is for sure mounted and exists on the system.
if err := fs.vhd.Resize(ctx, size); err != nil {
return errors.WithStackIf(err)
}
return nil
}
// MountDisk will attempt to mount the underlying virtual disk for the server.
// If the disk is already mounted this is a no-op function.
func (fs *Filesystem) MountDisk(ctx context.Context) error {
err := fs.vhd.Mount(ctx)
if errors.Is(err, vhd.ErrFilesystemMounted) {
return nil
}
return errors.WrapIf(err, "filesystem: failed to mount VHD")
}

View File

@@ -10,7 +10,6 @@ import (
"path/filepath"
"strconv"
"strings"
"time"
"emperror.dev/errors"
"github.com/apex/log"
@@ -25,17 +24,16 @@ import (
"github.com/pterodactyl/wings/system"
)
// Install executes the installation stack for a server process. Bubbles any
// errors up to the calling function which should handle contacting the panel to
// notify it of the server state.
// Executes the installation stack for a server process. Bubbles any errors up to the calling
// function which should handle contacting the panel to notify it of the server state.
//
// Pass true as the first argument in order to execute a server sync before the
// process to ensure the latest information is used.
// Pass true as the first argument in order to execute a server sync before the process to
// ensure the latest information is used.
func (s *Server) Install(sync bool) error {
if sync {
s.Log().Info("syncing server state with remote source before executing installation process")
if err := s.Sync(); err != nil {
return errors.WrapIf(err, "install: failed to sync server state with Panel")
return err
}
}
@@ -59,7 +57,7 @@ func (s *Server) Install(sync bool) error {
// error to this log entry. Otherwise ignore it in this log since whatever is calling
// this function should handle the error and will end up logging the same one.
if err == nil {
l.WithField("error", err)
l.WithField("error", serr)
}
l.Warn("failed to notify panel of server install state")
@@ -73,7 +71,7 @@ func (s *Server) Install(sync bool) error {
// the install is completed.
s.Events().Publish(InstallCompletedEvent, "")
return errors.WithStackIf(err)
return err
}
// Reinstalls a server's software by utilizing the install script for the server egg. This
@@ -81,8 +79,8 @@ func (s *Server) Install(sync bool) error {
func (s *Server) Reinstall() error {
if s.Environment.State() != environment.ProcessOfflineState {
s.Log().Debug("waiting for server instance to enter a stopped state")
if err := s.Environment.WaitForStop(s.Context(), time.Second*10, true); err != nil {
return errors.WrapIf(err, "install: failed to stop running environment")
if err := s.Environment.WaitForStop(10, true); err != nil {
return err
}
}
@@ -112,7 +110,9 @@ func (s *Server) internalInstall() error {
type InstallationProcess struct {
Server *Server
Script *remote.InstallationScript
client *client.Client
client *client.Client
context context.Context
}
// Generates a new installation process struct that will be used to create containers,
@@ -127,6 +127,7 @@ func NewInstallationProcess(s *Server, script *remote.InstallationScript) (*Inst
return nil, err
} else {
proc.client = c
proc.context = s.Context()
}
return proc, nil
@@ -156,7 +157,7 @@ func (s *Server) SetRestoring(state bool) {
// Removes the installer container for the server.
func (ip *InstallationProcess) RemoveContainer() error {
err := ip.client.ContainerRemove(ip.Server.Context(), ip.Server.ID()+"_installer", types.ContainerRemoveOptions{
err := ip.client.ContainerRemove(ip.context, ip.Server.ID()+"_installer", types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
@@ -166,10 +167,11 @@ func (ip *InstallationProcess) RemoveContainer() error {
return nil
}
// Run runs the installation process, this is done as in a background thread.
// This will configure the required environment, and then spin up the
// installation container. Once the container finishes installing the results
// are stored in an installation log in the server's configuration directory.
// Runs the installation process, this is done as in a background thread. This will configure
// the required environment, and then spin up the installation container.
//
// Once the container finishes installing the results will be stored in an installation
// log in the server's configuration directory.
func (ip *InstallationProcess) Run() error {
ip.Server.Log().Debug("acquiring installation process lock")
if !ip.Server.installing.SwapIf(true) {
@@ -205,7 +207,7 @@ func (ip *InstallationProcess) Run() error {
// Returns the location of the temporary data for the installation process.
func (ip *InstallationProcess) tempDir() string {
return filepath.Join(config.Get().System.TmpDirectory, ip.Server.ID())
return filepath.Join(os.TempDir(), "pterodactyl/", ip.Server.ID())
}
// Writes the installation script to a temporary file on the host machine so that it
@@ -265,9 +267,9 @@ func (ip *InstallationProcess) pullInstallationImage() error {
imagePullOptions.RegistryAuth = b64
}
r, err := ip.client.ImagePull(ip.Server.Context(), ip.Script.ContainerImage, imagePullOptions)
r, err := ip.client.ImagePull(context.Background(), ip.Script.ContainerImage, imagePullOptions)
if err != nil {
images, ierr := ip.client.ImageList(ip.Server.Context(), types.ImageListOptions{})
images, ierr := ip.client.ImageList(context.Background(), types.ImageListOptions{})
if ierr != nil {
// Well damn, something has gone really wrong here, just go ahead and abort there
// isn't much anything we can do to try and self-recover from this.
@@ -310,10 +312,9 @@ func (ip *InstallationProcess) pullInstallationImage() error {
return nil
}
// BeforeExecute runs before the container is executed. This pulls down the
// required docker container image as well as writes the installation script to
// the disk. This process is executed in an async manner, if either one fails
// the error is returned.
// Runs before the container is executed. This pulls down the required docker container image
// as well as writes the installation script to the disk. This process is executed in an async
// manner, if either one fails the error is returned.
func (ip *InstallationProcess) BeforeExecute() error {
if err := ip.writeScriptToDisk(); err != nil {
return errors.WithMessage(err, "failed to write installation script to disk")
@@ -339,7 +340,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
defer ip.RemoveContainer()
ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server")
reader, err := ip.client.ContainerLogs(ip.Server.Context(), containerId, types.ContainerLogsOptions{
reader, err := ip.client.ContainerLogs(ip.context, containerId, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: false,
@@ -394,13 +395,12 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
return nil
}
// Execute executes the installation process inside a specially created docker
// container.
// Executes the installation process inside a specially created docker container.
func (ip *InstallationProcess) Execute() (string, error) {
// Create a child context that is canceled once this function is done running. This
// will also be canceled if the parent context (from the Server struct) is canceled
// which occurs if the server is deleted.
ctx, cancel := context.WithCancel(ip.Server.Context())
ctx, cancel := context.WithCancel(ip.context)
defer cancel()
conf := &container.Config{
@@ -450,7 +450,6 @@ func (ip *InstallationProcess) Execute() (string, error) {
},
Privileged: true,
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
}
// Ensure the root directory for the server exists properly before attempting
@@ -512,15 +511,18 @@ func (ip *InstallationProcess) Execute() (string, error) {
// the server configuration directory, as well as to a websocket listener so
// that the process can be viewed in the panel by administrators.
func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error {
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: true}
reader, err := ip.client.ContainerLogs(ctx, id, opts)
reader, err := ip.client.ContainerLogs(ctx, id, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
return err
}
defer reader.Close()
err = system.ScanReader(reader, ip.Server.Sink(system.InstallSink).Push)
if err != nil && !errors.Is(err, context.Canceled) {
err = system.ScanReader(reader, ip.Server.Sink(InstallSink).Push)
if err != nil {
ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines")
}
return nil

View File

@@ -5,14 +5,11 @@ import (
"regexp"
"strconv"
"sync"
"time"
"github.com/apex/log"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote"
)
@@ -47,7 +44,7 @@ func (dsl *diskSpaceLimiter) Reset() {
func (dsl *diskSpaceLimiter) Trigger() {
dsl.o.Do(func() {
dsl.server.PublishConsoleOutputFromDaemon("Server is exceeding the assigned disk space limit, stopping process now.")
if err := dsl.server.Environment.WaitForStop(dsl.server.Context(), time.Minute, true); err != nil {
if err := dsl.server.Environment.WaitForStop(60, true); err != nil {
dsl.server.Log().WithField("error", err).Error("failed to stop server after exceeding space limit!")
}
})
@@ -75,57 +72,47 @@ func (s *Server) processConsoleOutputEvent(v []byte) {
return
}
s.Sink(system.LogSink).Push(v)
s.Sink(LogSink).Push(v)
}
// StartEventListeners adds all the internal event listeners we want to use for
// a server. These listeners can only be removed by deleting the server as they
// should last for the duration of the process' lifetime.
func (s *Server) StartEventListeners() {
c := make(chan []byte, 8)
limit := newDiskLimiter(s)
s.Log().Debug("registering event listeners: console, state, resources...")
s.Environment.Events().On(c)
s.Environment.SetLogCallback(s.processConsoleOutputEvent)
state := make(chan events.Event)
stats := make(chan events.Event)
docker := make(chan events.Event)
go func() {
l := newDiskLimiter(s)
for {
select {
case v := <-c:
go func(v []byte, limit *diskSpaceLimiter) {
var e events.Event
if err := events.DecodeTo(v, &e); err != nil {
return
case e := <-state:
go func() {
// Reset the throttler when the process is started.
if e.Data == environment.ProcessStartingState {
l.Reset()
s.Throttler().Reset()
}
s.OnStateChange()
}()
case e := <-stats:
go func() {
s.resources.UpdateStats(e.Data.(environment.Stats))
// If there is no disk space available at this point, trigger the server
// disk limiter logic which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) {
l.Trigger()
}
s.Events().Publish(StatsEvent, s.Proc())
}()
case e := <-docker:
go func() {
switch e.Topic {
case environment.ResourceEvent:
{
var stats struct {
Topic string
Data environment.Stats
}
if err := events.DecodeTo(v, &stats); err != nil {
s.Log().WithField("error", err).Warn("failed to decode server resource event")
return
}
s.resources.UpdateStats(stats.Data)
// If there is no disk space available at this point, trigger the server
// disk limiter logic which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) {
limit.Trigger()
}
s.Events().Publish(StatsEvent, s.Proc())
}
case environment.StateChangeEvent:
{
// Reset the throttler when the process is started.
if e.Data == environment.ProcessStartingState {
limit.Reset()
s.Throttler().Reset()
}
s.OnStateChange()
}
case environment.DockerImagePullStatus:
s.Events().Publish(InstallOutputEvent, e.Data)
case environment.DockerImagePullStarted:
@@ -133,13 +120,18 @@ func (s *Server) StartEventListeners() {
case environment.DockerImagePullCompleted:
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
default:
s.Log().WithField("topic", e.Topic).Error("unhandled docker event topic")
}
}(v, limit)
case <-s.Context().Done():
return
}()
}
}
}()
s.Log().Debug("registering event listeners: console, state, resources...")
s.Environment.SetLogCallback(s.processConsoleOutputEvent)
s.Environment.Events().On(state, environment.StateChangeEvent)
s.Environment.Events().On(stats, environment.ResourceEvent)
s.Environment.Events().On(docker, dockerEvents...)
}
var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))")

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"sync"
"time"
@@ -22,18 +23,16 @@ import (
)
type Manager struct {
mu sync.RWMutex
client remote.Client
skipVhdInitialization bool
servers []*Server
mu sync.RWMutex
client remote.Client
servers []*Server
}
// NewManager returns a new server manager instance. This will boot up all the
// servers that are currently present on the filesystem and set them into the
// manager.
func NewManager(ctx context.Context, client remote.Client, skipVhdInit bool) (*Manager, error) {
func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
m := NewEmptyManager(client)
m.skipVhdInitialization = skipVhdInit
if err := m.init(ctx); err != nil {
return nil, err
}
@@ -53,24 +52,6 @@ func (m *Manager) Client() remote.Client {
return m.client
}
// Len returns the count of servers stored in the manager instance.
func (m *Manager) Len() int {
m.mu.RLock()
defer m.mu.RUnlock()
return len(m.servers)
}
// Keys returns all of the server UUIDs stored in the manager set.
func (m *Manager) Keys() []string {
m.mu.RLock()
defer m.mu.RUnlock()
keys := make([]string, len(m.servers))
for i, s := range m.servers {
keys[i] = s.ID()
}
return keys
}
// Put replaces all the current values in the collection with the value that
// is passed through.
func (m *Manager) Put(s []*Server) {
@@ -185,7 +166,7 @@ func (m *Manager) ReadStates() (map[string]string, error) {
// InitServer initializes a server using a data byte array. This will be
// marshaled into the given struct using a YAML marshaler. This will also
// configure the given environment for a server.
func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfigurationResponse) (*Server, error) {
func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server, error) {
s, err := New(m.client)
if err != nil {
return nil, err
@@ -197,15 +178,7 @@ func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfiguratio
return nil, errors.WithStackIf(err)
}
s.fs = filesystem.New(s.ID(), s.DiskSpace(), s.Config().Egg.FileDenylist)
// If this is a virtual filesystem we need to go ahead and mount the disk
// so that everything is accessible.
if s.fs.IsVirtual() && !m.skipVhdInitialization {
log.WithField("server", s.ID()).Info("mounting virtual disk for server")
if err := s.fs.MountDisk(ctx); err != nil {
return nil, err
}
}
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
// Right now we only support a Docker based environment, so I'm going to hard code
// this logic in. When we're ready to support other environment we'll need to make
@@ -267,7 +240,7 @@ func (m *Manager) init(ctx context.Context) error {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
return
}
s, err := m.InitServer(ctx, d)
s, err := m.InitServer(d)
if err != nil {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
return

View File

@@ -8,7 +8,6 @@ import (
"emperror.dev/errors"
"github.com/google/uuid"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
)
@@ -134,11 +133,11 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return s.Environment.Start(s.Context())
case PowerActionStop:
fallthrough
// We're specifically waiting for the process to be stopped here, otherwise the lock is released
// too soon, and you can rack up all sorts of issues.
return s.Environment.WaitForStop(10*60, true)
case PowerActionRestart:
// We're specifically waiting for the process to be stopped here, otherwise the lock is
// released too soon, and you can rack up all sorts of issues.
if err := s.Environment.WaitForStop(s.Context(), time.Minute*10, true); err != nil {
if err := s.Environment.WaitForStop(10*60, true); err != nil {
// Even timeout errors should be bubbled back up the stack. If the process didn't stop
// nicely, but the terminate argument was passed then the server is stopped without an
// error being returned.
@@ -150,10 +149,6 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return err
}
if action == PowerActionStop {
return nil
}
// Now actually try to start the process by executing the normal pre-boot logic.
if err := s.onBeforeStart(); err != nil {
return err
@@ -161,7 +156,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return s.Environment.Start(s.Context())
case PowerActionTerminate:
return s.Environment.Terminate(s.Context(), os.Kill)
return s.Environment.Terminate(os.Kill)
}
return errors.New("attempting to handle unknown power action")
@@ -202,19 +197,15 @@ func (s *Server) onBeforeStart() error {
// we don't need to actively do anything about it at this point, worse comes to worst the
// server starts in a weird state and the user can manually adjust.
s.PublishConsoleOutputFromDaemon("Updating process configuration files...")
s.Log().Debug("updating server configuration files...")
s.UpdateConfigurationFiles()
s.Log().Debug("updated server configuration files")
if config.Get().System.CheckPermissionsOnBoot {
s.PublishConsoleOutputFromDaemon("Ensuring file permissions are set correctly, this could take a few seconds...")
// Ensure all the server file permissions are set correctly before booting the process.
s.Log().Debug("chowning server root directory...")
if err := s.Filesystem().Chown("/"); err != nil {
return errors.WithMessage(err, "failed to chown root server directory during pre-boot process")
}
}
s.Log().Info("completed server preflight, starting boot process...")
return nil
}

View File

@@ -4,7 +4,6 @@ import (
"testing"
. "github.com/franela/goblin"
"github.com/pterodactyl/wings/system"
)

View File

@@ -70,10 +70,10 @@ type Server struct {
wsBag *WebsocketBag
wsBagLocker sync.Mutex
sinks map[system.SinkName]*system.SinkPool
sinks map[SinkName]*sinkPool
logSink *system.SinkPool
installSink *system.SinkPool
logSink *sinkPool
installSink *sinkPool
}
// New returns a new server instance with a context and all of the default
@@ -88,9 +88,9 @@ func New(client remote.Client) (*Server, error) {
transferring: system.NewAtomicBool(false),
restoring: system.NewAtomicBool(false),
powerLock: system.NewLocker(),
sinks: map[system.SinkName]*system.SinkPool{
system.LogSink: system.NewSinkPool(),
system.InstallSink: system.NewSinkPool(),
sinks: map[SinkName]*sinkPool{
LogSink: newSinkPool(),
InstallSink: newSinkPool(),
},
}
if err := defaults.Set(&s); err != nil {
@@ -179,8 +179,6 @@ func (s *Server) Log() *log.Entry {
//
// This also means mass actions can be performed against servers on the Panel
// and they will automatically sync with Wings when the server is started.
//
// TODO: accept a context value rather than using the server's context.
func (s *Server) Sync() error {
cfg, err := s.client.GetServerConfiguration(s.Context(), s.ID())
if err != nil {
@@ -196,9 +194,7 @@ func (s *Server) Sync() error {
// Update the disk space limits for the server whenever the configuration for
// it changes.
if err := s.fs.SetDiskLimit(s.Context(), s.DiskSpace()); err != nil {
return errors.WrapIf(err, "server: failed to sync server configuration from API")
}
s.fs.SetDiskLimit(s.DiskSpace())
s.SyncWithEnvironment()

View File

@@ -1,4 +1,4 @@
package system
package server
import (
"sync"
@@ -16,20 +16,20 @@ const (
InstallSink SinkName = "install"
)
// SinkPool represents a pool with sinks.
type SinkPool struct {
// sinkPool represents a pool with sinks.
type sinkPool struct {
mu sync.RWMutex
sinks []chan []byte
}
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
// server instance for its full lifetime.
func NewSinkPool() *SinkPool {
return &SinkPool{}
// newSinkPool returns a new empty sinkPool. A sink pool generally lives with a
// server instance for it's full lifetime.
func newSinkPool() *sinkPool {
return &sinkPool{}
}
// On adds a channel to the sink pool instance.
func (p *SinkPool) On(c chan []byte) {
func (p *sinkPool) On(c chan []byte) {
p.mu.Lock()
p.sinks = append(p.sinks, c)
p.mu.Unlock()
@@ -37,7 +37,7 @@ func (p *SinkPool) On(c chan []byte) {
// Off removes a given channel from the sink pool. If no matching sink is found
// this function is a no-op. If a matching channel is found, it will be removed.
func (p *SinkPool) Off(c chan []byte) {
func (p *sinkPool) Off(c chan []byte) {
p.mu.Lock()
defer p.mu.Unlock()
@@ -66,7 +66,7 @@ func (p *SinkPool) Off(c chan []byte) {
// Destroy destroys the pool by removing and closing all sinks and destroying
// all of the channels that are present.
func (p *SinkPool) Destroy() {
func (p *sinkPool) Destroy() {
p.mu.Lock()
defer p.mu.Unlock()
@@ -95,7 +95,7 @@ func (p *SinkPool) Destroy() {
// likely the best option anyways. This uses waitgroups to allow every channel
// to attempt its send concurrently thus making the total blocking time of this
// function "O(1)" instead of "O(n)".
func (p *SinkPool) Push(data []byte) {
func (p *sinkPool) Push(data []byte) {
p.mu.RLock()
defer p.mu.RUnlock()
var wg sync.WaitGroup
@@ -119,3 +119,24 @@ func (p *SinkPool) Push(data []byte) {
}
wg.Wait()
}
// Sink returns the instantiated and named sink for a server. If the sink has
// not been configured yet this function will cause a panic condition.
func (s *Server) Sink(name SinkName) *sinkPool {
sink, ok := s.sinks[name]
if !ok {
s.Log().Fatalf("attempt to access nil sink: %s", name)
}
return sink
}
// DestroyAllSinks iterates over all of the sinks configured for the server and
// destroys their instances. Note that this will cause a panic if you attempt
// to call Server.Sink() again after. This function is only used when a server
// is being deleted from the system.
func (s *Server) DestroyAllSinks() {
s.Log().Info("destroying all registered sinks for server instance")
for _, sink := range s.sinks {
sink.Destroy()
}
}

View File

@@ -1,4 +1,4 @@
package system
package server
import (
"fmt"
@@ -23,7 +23,7 @@ func TestSink(t *testing.T) {
g.Describe("SinkPool#On", func() {
g.It("pushes additional channels to a sink", func() {
pool := &SinkPool{}
pool := &sinkPool{}
g.Assert(pool.sinks).IsZero()
@@ -36,9 +36,9 @@ func TestSink(t *testing.T) {
})
g.Describe("SinkPool#Off", func() {
var pool *SinkPool
var pool *sinkPool
g.BeforeEach(func() {
pool = &SinkPool{}
pool = &sinkPool{}
})
g.It("works when no sinks are registered", func() {
@@ -97,9 +97,9 @@ func TestSink(t *testing.T) {
})
g.Describe("SinkPool#Push", func() {
var pool *SinkPool
var pool *sinkPool
g.BeforeEach(func() {
pool = &SinkPool{}
pool = &sinkPool{}
})
g.It("works when no sinks are registered", func() {
@@ -190,9 +190,9 @@ func TestSink(t *testing.T) {
})
g.Describe("SinkPool#Destroy", func() {
var pool *SinkPool
var pool *sinkPool
g.BeforeEach(func() {
pool = &SinkPool{}
pool = &sinkPool{}
})
g.It("works if no sinks are registered", func() {

View File

@@ -1,8 +1,6 @@
package server
import (
"time"
"github.com/pterodactyl/wings/environment/docker"
"github.com/pterodactyl/wings/environment"
@@ -60,7 +58,7 @@ func (s *Server) SyncWithEnvironment() {
s.Log().Info("server suspended with running process state, terminating now")
go func(s *Server) {
if err := s.Environment.WaitForStop(s.Context(), time.Minute, true); err != nil {
if err := s.Environment.WaitForStop(60, true); err != nil {
s.Log().WithField("error", err).Warn("failed to terminate server environment after suspension")
}
}(s)

View File

@@ -1,59 +0,0 @@
package sftp
import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models"
)
type eventHandler struct {
ip string
user string
server string
}
type FileAction struct {
// Entity is the targeted file or directory (depending on the event) that the action
// is being performed _against_, such as "/foo/test.txt". This will always be the full
// path to the element.
Entity string
// Target is an optional (often blank) field that only has a value in it when the event
// is specifically modifying the entity, such as a rename or move event. In that case
// the Target field will be the final value, such as "/bar/new.txt"
Target string
}
// Log parses a SFTP specific file activity event and then passes it off to be stored
// in the normal activity database.
func (eh *eventHandler) Log(e models.Event, fa FileAction) error {
metadata := map[string]interface{}{
"files": []string{fa.Entity},
}
if fa.Target != "" {
metadata["files"] = []map[string]string{
{"from": fa.Entity, "to": fa.Target},
}
}
a := models.Activity{
Server: eh.server,
Event: e,
Metadata: metadata,
IP: eh.ip,
}
if tx := database.Instance().Create(a.SetUser(eh.user)); tx.Error != nil {
return errors.WithStack(tx.Error)
}
return nil
}
// MustLog is a wrapper around log that will trigger a fatal error and exit the application
// if an error is encountered during the logging of the event.
func (eh *eventHandler) MustLog(e models.Event, fa FileAction) {
if err := eh.Log(e, fa); err != nil {
log.WithField("error", errors.WithStack(err)).WithField("event", e).Error("sftp: failed to log event")
}
}

View File

@@ -27,40 +27,32 @@ const (
)
type Handler struct {
mu sync.Mutex
mu sync.Mutex
permissions []string
server *server.Server
fs *filesystem.Filesystem
events *eventHandler
permissions []string
logger *log.Entry
ro bool
}
// NewHandler returns a new connection handler for the SFTP server. This allows a given user
// Returns a new connection handler for the SFTP server. This allows a given user
// to access the underlying filesystem.
func NewHandler(sc *ssh.ServerConn, srv *server.Server) (*Handler, error) {
uuid, ok := sc.Permissions.Extensions["user"]
if !ok {
return nil, errors.New("sftp: mismatched Wings and Panel versions — Panel 1.10 is required for this version of Wings.")
}
events := eventHandler{
ip: sc.RemoteAddr().String(),
user: uuid,
server: srv.ID(),
}
func NewHandler(sc *ssh.ServerConn, srv *server.Server) *Handler {
return &Handler{
permissions: strings.Split(sc.Permissions.Extensions["permissions"], ","),
server: srv,
fs: srv.Filesystem(),
events: &events,
ro: config.Get().System.Sftp.ReadOnly,
logger: log.WithFields(log.Fields{"subsystem": "sftp", "user": uuid, "ip": sc.RemoteAddr()}),
}, nil
logger: log.WithFields(log.Fields{
"subsystem": "sftp",
"username": sc.User(),
"ip": sc.RemoteAddr(),
}),
}
}
// Handlers returns the sftp.Handlers for this struct.
// Returns the sftp.Handlers for this struct.
func (h *Handler) Handlers() sftp.Handlers {
return sftp.Handlers{
FileGet: h,
@@ -129,12 +121,7 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
}
// Chown may or may not have been called in the touch function, so always do
// it at this point to avoid the file being improperly owned.
_ = h.fs.Chown(request.Filepath)
event := server.ActivitySftpWrite
if permission == PermissionFileCreate {
event = server.ActivitySftpCreate
}
h.events.MustLog(event, FileAction{Entity: request.Filepath})
_ = h.server.Filesystem().Chown(request.Filepath)
return f, nil
}
@@ -185,7 +172,6 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
l.WithField("error", err).Error("failed to rename file")
return sftp.ErrSSHFxFailure
}
h.events.MustLog(server.ActivitySftpRename, FileAction{Entity: request.Filepath, Target: request.Target})
break
// Handle deletion of a directory. This will properly delete all of the files and
// folders within that directory if it is not already empty (unlike a lot of SFTP
@@ -194,12 +180,10 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
if !h.can(PermissionFileDelete) {
return sftp.ErrSSHFxPermissionDenied
}
p := filepath.Clean(request.Filepath)
if err := h.fs.Delete(p); err != nil {
if err := h.fs.Delete(request.Filepath); err != nil {
l.WithField("error", err).Error("failed to remove directory")
return sftp.ErrSSHFxFailure
}
h.events.MustLog(server.ActivitySftpDelete, FileAction{Entity: request.Filepath})
return sftp.ErrSSHFxOk
// Handle requests to create a new Directory.
case "Mkdir":
@@ -207,12 +191,11 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
return sftp.ErrSSHFxPermissionDenied
}
name := strings.Split(filepath.Clean(request.Filepath), "/")
p := strings.Join(name[0:len(name)-1], "/")
if err := h.fs.CreateDirectory(name[len(name)-1], p); err != nil {
err := h.fs.CreateDirectory(name[len(name)-1], strings.Join(name[0:len(name)-1], "/"))
if err != nil {
l.WithField("error", err).Error("failed to create directory")
return sftp.ErrSSHFxFailure
}
h.events.MustLog(server.ActivitySftpCreateDirectory, FileAction{Entity: request.Filepath})
break
// Support creating symlinks between files. The source and target must resolve within
// the server home directory.
@@ -245,7 +228,6 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
l.WithField("error", err).Error("failed to remove a file")
return sftp.ErrSSHFxFailure
}
h.events.MustLog(server.ActivitySftpDelete, FileAction{Entity: request.Filepath})
return sftp.ErrSSHFxOk
default:
return sftp.ErrSSHFxOpUnsupported
@@ -305,10 +287,15 @@ func (h *Handler) can(permission string) bool {
if h.server.IsSuspended() {
return false
}
// SFTPServer owners and super admins have their permissions returned as '[*]' via the Panel
// API, so for the sake of speed do an initial check for that before iterating over the
// entire array of permissions.
if len(h.permissions) == 1 && h.permissions[0] == "*" {
return true
}
for _, p := range h.permissions {
// If we match the permission specifically, or the user has been granted the "*"
// permission because they're an admin, let them through.
if p == permission || p == "*" {
if p == permission {
return true
}
}

View File

@@ -68,14 +68,9 @@ func (c *SFTPServer) Run() error {
}
conf := &ssh.ServerConfig{
NoClientAuth: false,
MaxAuthTries: 6,
PasswordCallback: func(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
return c.makeCredentialsRequest(conn, remote.SftpAuthPassword, string(password))
},
PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
return c.makeCredentialsRequest(conn, remote.SftpAuthPublicKey, string(ssh.MarshalAuthorizedKey(key)))
},
NoClientAuth: false,
MaxAuthTries: 6,
PasswordCallback: c.passwordCallback,
}
conf.AddHostKey(private)
@@ -91,21 +86,19 @@ func (c *SFTPServer) Run() error {
if conn, _ := listener.Accept(); conn != nil {
go func(conn net.Conn) {
defer conn.Close()
if err := c.AcceptInbound(conn, conf); err != nil {
log.WithField("error", err).Error("sftp: failed to accept inbound connection")
}
c.AcceptInbound(conn, conf)
}(conn)
}
}
}
// AcceptInbound handles an inbound connection to the instance and determines if we should
// serve the request or not.
func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) error {
// Handles an inbound connection to the instance and determines if we should serve the
// request or not.
func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
// Before beginning a handshake must be performed on the incoming net.Conn
sconn, chans, reqs, err := ssh.NewServerConn(conn, config)
if err != nil {
return errors.WithStack(err)
return
}
defer sconn.Close()
go ssh.DiscardRequests(reqs)
@@ -151,17 +144,11 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) erro
// Spin up a SFTP server instance for the authenticated user's server allowing
// them access to the underlying filesystem.
handler, err := NewHandler(sconn, srv)
if err != nil {
return errors.WithStackIf(err)
}
rs := sftp.NewRequestServer(channel, handler.Handlers())
if err := rs.Serve(); err == io.EOF {
_ = rs.Close()
handler := sftp.NewRequestServer(channel, NewHandler(sconn, srv).Handlers())
if err := handler.Serve(); err == io.EOF {
handler.Close()
}
}
return nil
}
// Generates a new ED25519 private key that is used for host authentication when
@@ -190,17 +177,17 @@ func (c *SFTPServer) generateED25519PrivateKey() error {
return nil
}
func (c *SFTPServer) makeCredentialsRequest(conn ssh.ConnMetadata, t remote.SftpAuthRequestType, p string) (*ssh.Permissions, error) {
// A function capable of validating user credentials with the Panel API.
func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
request := remote.SftpAuthRequest{
Type: t,
User: conn.User(),
Pass: p,
Pass: string(pass),
IP: conn.RemoteAddr().String(),
SessionID: conn.SessionID(),
ClientVersion: conn.ClientVersion(),
}
logger := log.WithFields(log.Fields{"subsystem": "sftp", "method": request.Type, "username": request.User, "ip": request.IP})
logger := log.WithFields(log.Fields{"subsystem": "sftp", "username": conn.User(), "ip": conn.RemoteAddr().String()})
logger.Debug("validating credentials for SFTP connection")
if !validUsernameRegexp.MatchString(request.User) {
@@ -219,16 +206,15 @@ func (c *SFTPServer) makeCredentialsRequest(conn ssh.ConnMetadata, t remote.Sftp
}
logger.WithField("server", resp.Server).Debug("credentials validated and matched to server instance")
permissions := ssh.Permissions{
sshPerm := &ssh.Permissions{
Extensions: map[string]string{
"ip": conn.RemoteAddr().String(),
"uuid": resp.Server,
"user": resp.User,
"user": conn.User(),
"permissions": strings.Join(resp.Permissions, ","),
},
}
return &permissions, nil
return sshPerm, nil
}
// PrivateKeyPath returns the path the host private key for this server instance.

View File

@@ -1,3 +1,3 @@
package system
var Version = "develop"
var Version = "1.6.0"

View File

@@ -42,6 +42,7 @@ func (l *Locker) Acquire() error {
return nil
}
// TryAcquire will attempt to acquire a power-lock until the context provided
// is canceled.
func (l *Locker) TryAcquire(ctx context.Context) error {
@@ -50,9 +51,7 @@ func (l *Locker) TryAcquire(ctx context.Context) error {
return nil
case <-ctx.Done():
if err := ctx.Err(); err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return ErrLockerLocked
}
return err
}
return nil
}

View File

@@ -81,7 +81,7 @@ func TestPower(t *testing.T) {
err := l.TryAcquire(ctx)
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, ErrLockerLocked)).IsTrue()
g.Assert(errors.Is(err, context.DeadlineExceeded)).IsTrue()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(1)
g.Assert(l.IsLocked()).IsTrue()
@@ -95,7 +95,7 @@ func TestPower(t *testing.T) {
l.Acquire()
go func() {
time.AfterFunc(time.Millisecond*50, func() {
time.AfterFunc(time.Millisecond * 50, func() {
l.Release()
})
}()

View File

@@ -44,7 +44,7 @@ func (r *Rate) Try() bool {
// Reset resets the internal state of the rate limiter back to zero.
func (r *Rate) Reset() {
r.mu.Lock()
r.count = 0
r.last = time.Now()
r.count = 0
r.last = time.Now()
r.mu.Unlock()
}

View File

@@ -47,7 +47,7 @@ func TestRate(t *testing.T) {
g.It("resets back to zero when called", func() {
r := NewRate(10, time.Second)
for i := 0; i < 100; i++ {
if i%10 == 0 {
if i % 10 == 0 {
r.Reset()
}
g.Assert(r.Try()).IsTrue()

View File

@@ -1,29 +0,0 @@
package system
import (
"math/rand"
"regexp"
"strings"
)
var ipTrimRegex = regexp.MustCompile(`(:\d*)?$`)
const characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
// RandomString generates a random string of alpha-numeric characters using a
// pseudo-random number generator. The output of this function IS NOT cryptographically
// secure, it is used solely for generating random strings outside a security context.
func RandomString(n int) string {
var b strings.Builder
b.Grow(n)
for i := 0; i < n; i++ {
b.WriteByte(characters[rand.Intn(len(characters))])
}
return b.String()
}
// TrimIPSuffix removes the internal port value from an IP address to ensure we're only
// ever working directly with the IP address.
func TrimIPSuffix(s string) string {
return ipTrimRegex.ReplaceAllString(s, "")
}

View File

@@ -3,10 +3,12 @@ package system
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"strconv"
"sync"
"time"
"emperror.dev/errors"
"github.com/goccy/go-json"
@@ -88,16 +90,16 @@ func ScanReader(r io.Reader, callback func(line []byte)) error {
} else {
buf.Write(line)
}
// If we encountered an error with something in ReadLine that was not an
// EOF just abort the entire process here.
if err != nil && err != io.EOF {
return err
}
// Finish this loop and begin outputting the line if there is no prefix
// (the line fit into the default buffer), or if we hit the end of the line.
if !isPrefix || err == io.EOF {
break
}
// If we encountered an error with something in ReadLine that was not an
// EOF just abort the entire process here.
if err != nil {
return err
}
}
// Send the full buffer length over to the event handler to be emitted in
@@ -120,6 +122,22 @@ func ScanReader(r io.Reader, callback func(line []byte)) error {
return nil
}
// Runs a given work function every "d" duration until the provided context is canceled.
func Every(ctx context.Context, d time.Duration, work func(t time.Time)) {
ticker := time.NewTicker(d)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case t := <-ticker.C:
work(t)
}
}
}()
}
func FormatBytes(b int64) string {
if b < 1024 {
return fmt.Sprintf("%d B", b)

View File

@@ -1,18 +1,9 @@
package main
import (
"math/rand"
"time"
"github.com/pterodactyl/wings/cmd"
)
func main() {
// Since we make use of the math/rand package in the code, especially for generating
// non-cryptographically secure random strings we need to seed the RNG. Just make use
// of the current time for this.
rand.Seed(time.Now().UnixNano())
// Execute the main binary code.
cmd.Execute()
}