Compare commits
67 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b00d328107 | ||
|
|
daa0ab75b4 | ||
|
|
ff4b7655c8 | ||
|
|
99cb61a6ef | ||
|
|
0407f22147 | ||
|
|
a74ea6a9ff | ||
|
|
37c52dd439 | ||
|
|
f8a25cb040 | ||
|
|
a6a610fd82 | ||
|
|
dfe5a77e0a | ||
|
|
d8a7bf2dde | ||
|
|
265f8a6b39 | ||
|
|
7fed6a68cb | ||
|
|
b0f99e2328 | ||
|
|
957257ecc3 | ||
|
|
058f643e65 | ||
|
|
6c7065592d | ||
|
|
3f481e9540 | ||
|
|
984bd10cf2 | ||
|
|
f5a64a0d7f | ||
|
|
6fb61261b0 | ||
|
|
3edec80efa | ||
|
|
0637eebefe | ||
|
|
e98d249cf7 | ||
|
|
b20bf6deab | ||
|
|
1b268b5625 | ||
|
|
7245791214 | ||
|
|
02cbf2df5b | ||
|
|
b6edf3acf9 | ||
|
|
c686992e85 | ||
|
|
c736c24118 | ||
|
|
9dfc651a91 | ||
|
|
ad26022c30 | ||
|
|
83861a6dec | ||
|
|
231e24aa33 | ||
|
|
e3ab241d7f | ||
|
|
c18e844689 | ||
|
|
8cee18a92b | ||
|
|
f952efd9c7 | ||
|
|
21cf66b2b4 | ||
|
|
251f91a08e | ||
|
|
4634c93182 | ||
|
|
8a867ccc44 | ||
|
|
61baccb1a3 | ||
|
|
7bd11c1c28 | ||
|
|
e1e7916790 | ||
|
|
f28e06267c | ||
|
|
59fbd2bcea | ||
|
|
204a4375fc | ||
|
|
dda7d10d37 | ||
|
|
ed330fa6be | ||
|
|
9864a0fe34 | ||
|
|
214baf83fb | ||
|
|
41fc1973d1 | ||
|
|
a51ce6f4ac | ||
|
|
cec51f11f0 | ||
|
|
b1be2081eb | ||
|
|
203a2091a0 | ||
|
|
7fa7cc313f | ||
|
|
f390784973 | ||
|
|
5df1acd10e | ||
|
|
1927a59cd0 | ||
|
|
5bcf4164fb | ||
|
|
37e4d57cdf | ||
|
|
7ededdb9a2 | ||
|
|
1d197714df | ||
|
|
6c98a955e3 |
3
.github/FUNDING.yml
vendored
3
.github/FUNDING.yml
vendored
@@ -1,2 +1 @@
|
|||||||
github: [ DaneEveritt ]
|
github: [ matthewpi ]
|
||||||
custom: [ "https://paypal.me/PterodactylSoftware" ]
|
|
||||||
|
|||||||
3
.github/workflows/build-test.yml
vendored
3
.github/workflows/build-test.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ ubuntu-20.04 ]
|
os: [ ubuntu-20.04 ]
|
||||||
go: [ '^1.17' ]
|
go: [ '1.18.7' ]
|
||||||
goos: [ linux ]
|
goos: [ linux ]
|
||||||
goarch: [ amd64, arm64 ]
|
goarch: [ amd64, arm64 ]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
@@ -58,7 +58,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH} wings.go
|
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH} wings.go
|
||||||
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH}_debug wings.go
|
go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH}_debug wings.go
|
||||||
upx build/wings_${GOOS}_${{ matrix.goarch }}
|
|
||||||
chmod +x build/*
|
chmod +x build/*
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: go test -race ./...
|
run: go test -race ./...
|
||||||
|
|||||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: '^1.17'
|
go-version: '1.18.7'
|
||||||
- name: Build
|
- name: Build
|
||||||
env:
|
env:
|
||||||
REF: ${{ github.ref }}
|
REF: ${{ github.ref }}
|
||||||
@@ -22,8 +22,8 @@ jobs:
|
|||||||
run: go test ./...
|
run: go test ./...
|
||||||
- name: Compress binary and make it executable
|
- name: Compress binary and make it executable
|
||||||
run: |
|
run: |
|
||||||
upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
chmod +x build/wings_linux_amd64
|
||||||
upx build/wings_linux_arm64 && chmod +x build/wings_linux_arm64
|
chmod +x build/wings_linux_arm64
|
||||||
- name: Extract changelog
|
- name: Extract changelog
|
||||||
env:
|
env:
|
||||||
REF: ${{ github.ref }}
|
REF: ${{ github.ref }}
|
||||||
|
|||||||
51
CHANGELOG.md
51
CHANGELOG.md
@@ -1,5 +1,56 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.7.2
|
||||||
|
### Fixed
|
||||||
|
* The S3 backup driver now supports Cloudflare R2
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* During a server transfer, there is a new "Archiving" status that outputs the progress of creating the server transfer archive.
|
||||||
|
* Adds a configuration option to control the list of trusted proxies that can be used to determine the client IP address.
|
||||||
|
* Adds a configuration option to control the Docker username space setting when Wings creates containers.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* Releases are now built using `Go 1.18` — the minimum version required to build Wings is now `Go 1.18`.
|
||||||
|
|
||||||
|
## v1.7.1
|
||||||
|
### Fixed
|
||||||
|
* YAML parser has been updated to fix some strange issues
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* Added `Force Outgoing IP` option for servers to ensure outgoing traffic uses the server's IP address
|
||||||
|
* Adds an option to control the level of gzip compression for backups
|
||||||
|
|
||||||
|
## v1.7.0
|
||||||
|
### Fixed
|
||||||
|
* Fixes multi-platform support for Wings' Docker image.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* Adds support for tracking of SFTP actions, power actions, server commands, and file uploads by utilizing a local SQLite database and processing events before sending them to the Panel.
|
||||||
|
* Adds support for configuring the MTU on the `pterodactyl0` network.
|
||||||
|
|
||||||
|
## v1.6.4
|
||||||
|
### Fixed
|
||||||
|
* Fixes a bug causing CPU limiting to not be properly applied to servers.
|
||||||
|
* Fixes a bug causing zip archives to decompress without taking into account nested folder structures.
|
||||||
|
|
||||||
|
## v1.6.3
|
||||||
|
### Fixed
|
||||||
|
* Fixes SFTP authentication failing for administrative users due to a permissions adjustment on the Panel.
|
||||||
|
|
||||||
|
## v1.6.2
|
||||||
|
### Fixed
|
||||||
|
* Fixes file upload size not being properly enforced.
|
||||||
|
* Fixes a bug that prevented listing a directory when it contained a named pipe. Also added a check to prevent attempting to read a named pipe directly.
|
||||||
|
* Fixes a bug with the archiver logic that would include folders that had the same name prefix. (for example, requesting only `map` would also include `map2` and `map3`)
|
||||||
|
* Requests to the Panel that return a client error (4xx response code) no longer trigger an exponential backoff, they immediately stop the request.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* CPU limit fields are only set on the Docker container if they have been specified for the server — otherwise they are left empty.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* Added the ability to define the location of the temporary folder used by Wings — defaults to `/tmp/pterodactyl`.
|
||||||
|
* Adds the ability to authenticate for SFTP using public keys (requires `Panel@1.8.0`).
|
||||||
|
|
||||||
## v1.6.1
|
## v1.6.1
|
||||||
### Fixed
|
### Fixed
|
||||||
* Fixes error that would sometimes occur when starting a server that would cause the temporary power action lock to never be released due to a blocked channel.
|
* Fixes error that would sometimes occur when starting a server that would cause the temporary power action lock to never be released due to a blocked channel.
|
||||||
|
|||||||
@@ -1,19 +1,18 @@
|
|||||||
# Stage 1 (Build)
|
# Stage 1 (Build)
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.17-alpine AS builder
|
FROM golang:1.18-alpine AS builder
|
||||||
|
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
RUN apk add --update --no-cache git make upx
|
RUN apk add --update --no-cache git make
|
||||||
WORKDIR /app/
|
WORKDIR /app/
|
||||||
COPY go.mod go.sum /app/
|
COPY go.mod go.sum /app/
|
||||||
RUN go mod download
|
RUN go mod download
|
||||||
COPY . /app/
|
COPY . /app/
|
||||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
|
RUN CGO_ENABLED=0 go build \
|
||||||
-ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=$VERSION" \
|
-ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=$VERSION" \
|
||||||
-v \
|
-v \
|
||||||
-trimpath \
|
-trimpath \
|
||||||
-o wings \
|
-o wings \
|
||||||
wings.go
|
wings.go
|
||||||
RUN upx wings
|
|
||||||
RUN echo "ID=\"distroless\"" > /etc/os-release
|
RUN echo "ID=\"distroless\"" > /etc/os-release
|
||||||
|
|
||||||
# Stage 2 (Final)
|
# Stage 2 (Final)
|
||||||
|
|||||||
6
Makefile
6
Makefile
@@ -4,6 +4,9 @@ build:
|
|||||||
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_amd64 -v wings.go
|
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_amd64 -v wings.go
|
||||||
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
|
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
|
||||||
|
|
||||||
|
race:
|
||||||
|
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
|
||||||
|
|
||||||
debug:
|
debug:
|
||||||
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
|
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
|
||||||
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1
|
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1
|
||||||
@@ -14,9 +17,6 @@ rmdebug:
|
|||||||
go build -gcflags "all=-N -l" -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
|
go build -gcflags "all=-N -l" -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
|
||||||
sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml
|
sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml
|
||||||
|
|
||||||
compress:
|
|
||||||
upx --brute build/wings_*
|
|
||||||
|
|
||||||
cross-build: clean build compress
|
cross-build: clean build compress
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
|
|||||||
17
README.md
17
README.md
@@ -14,27 +14,22 @@ dependencies, and allowing users to authenticate with the same credentials they
|
|||||||
|
|
||||||
## Sponsors
|
## Sponsors
|
||||||
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
|
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
|
||||||
[Interested in becoming a sponsor?](https://github.com/sponsors/DaneEveritt)
|
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
|
||||||
|
|
||||||
| Company | About |
|
| Company | About |
|
||||||
| ------- | ----- |
|
| ------- | ----- |
|
||||||
| [**WISP**](https://wisp.gg) | Extra features. |
|
| [**WISP**](https://wisp.gg) | Extra features. |
|
||||||
| [**MixmlHosting**](https://mixmlhosting.com) | MixmlHosting provides high quality Virtual Private Servers along with game servers, all at a affordable price. |
|
|
||||||
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
|
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
|
||||||
|
| [**Fragnet**](https://fragnet.net) | Providing low latency, high-end game hosting solutions to gamers, game studios and eSports platforms. |
|
||||||
|
| [**Tempest**](https://tempest.net/) | Tempest Hosting is a subsidiary of Path Network, Inc. offering unmetered DDoS protected 10Gbps dedicated servers, starting at just $80/month. Full anycast, tons of filters. |
|
||||||
| [**Bloom.host**](https://bloom.host) | Bloom.host offers dedicated core VPS and Minecraft hosting with Ryzen 9 processors. With owned-hardware, we offer truly unbeatable prices on high-performance hosting. |
|
| [**Bloom.host**](https://bloom.host) | Bloom.host offers dedicated core VPS and Minecraft hosting with Ryzen 9 processors. With owned-hardware, we offer truly unbeatable prices on high-performance hosting. |
|
||||||
| [**MineStrator**](https://minestrator.com/) | Looking for a French highend hosting company for you minecraft server? More than 14,000 members on our discord, trust us. |
|
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
|
||||||
| [**DedicatedMC**](https://dedicatedmc.io/) | DedicatedMC provides Raw Power hosting at affordable pricing, making sure to never compromise on your performance and giving you the best performance money can buy. |
|
|
||||||
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
|
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
|
||||||
| [**XCORE**](https://xcore-server.de/) | XCORE offers High-End Servers for hosting and gaming since 2012. Fast, excellent and well-known for eSports Gaming. |
|
|
||||||
| [**RoyaleHosting**](https://royalehosting.net/) | Build your dreams and deploy them with RoyaleHosting’s reliable servers and network. Easy to use, provisioned in a couple of minutes. |
|
|
||||||
| [**Spill Hosting**](https://spillhosting.no/) | Spill Hosting is a Norwegian hosting service, which aims for inexpensive services on quality servers. Premium i9-9900K processors will run your game like a dream. |
|
|
||||||
| [**DeinServerHost**](https://deinserverhost.de/) | DeinServerHost offers Dedicated, vps and Gameservers for many popular Games like Minecraft and Rust in Germany since 2013. |
|
| [**DeinServerHost**](https://deinserverhost.de/) | DeinServerHost offers Dedicated, vps and Gameservers for many popular Games like Minecraft and Rust in Germany since 2013. |
|
||||||
| [**HostBend**](https://hostbend.com/) | HostBend offers a variety of solutions for developers, students, and others who have a tight budget but don't want to compromise quality and support. |
|
|
||||||
| [**Capitol Hosting Solutions**](https://chs.gg/) | CHS is *the* budget friendly hosting company for Australian and American gamers, offering a variety of plans from Web Hosting to Game Servers; Custom Solutions too! |
|
|
||||||
| [**ByteAnia**](https://byteania.com/?utm_source=pterodactyl) | ByteAnia offers the best performing and most affordable **Ryzen 5000 Series hosting** on the market for *unbeatable prices*! |
|
|
||||||
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
|
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
|
||||||
|
| [**HostEZ**](https://hostez.io) | Providing North America Valheim, Minecraft and other popular games with low latency, high uptime and maximum availability. EZ! |
|
||||||
| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa.|
|
| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa.|
|
||||||
| [**RocketNode**](https://rocketnode.net) | RocketNode is a VPS and Game Server provider that offers the best performing VPS and Game hosting Solutions at affordable prices! |
|
| [**Gamenodes**](https://gamenodes.nl) | Gamenodes love quality. For Minecraft, Discord Bots and other services, among others. With our own programmers, we provide just that little bit of extra service! |
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
* [Panel Documentation](https://pterodactyl.io/panel/1.0/getting_started.html)
|
* [Panel Documentation](https://pterodactyl.io/panel/1.0/getting_started.html)
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ func newDiagnosticsCommand() *cobra.Command {
|
|||||||
return command
|
return command
|
||||||
}
|
}
|
||||||
|
|
||||||
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
|
// diagnosticsCmdRun collects diagnostics about wings, its configuration and the node.
|
||||||
// We collect:
|
// We collect:
|
||||||
// - wings and docker versions
|
// - wings and docker versions
|
||||||
// - relevant parts of daemon configuration
|
// - relevant parts of daemon configuration
|
||||||
|
|||||||
127
cmd/migrate_vhd.go
Normal file
127
cmd/migrate_vhd.go
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"github.com/apex/log"
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/internal/vhd"
|
||||||
|
"github.com/pterodactyl/wings/loggers/cli"
|
||||||
|
"github.com/pterodactyl/wings/remote"
|
||||||
|
"github.com/pterodactyl/wings/server"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MigrateVHDCommand struct {
|
||||||
|
manager *server.Manager
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMigrateVHDCommand() *cobra.Command {
|
||||||
|
return &cobra.Command{
|
||||||
|
Use: "migrate-vhd",
|
||||||
|
Short: "migrates existing data from a directory tree into virtual hard-disks",
|
||||||
|
PreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
log.SetLevel(log.DebugLevel)
|
||||||
|
log.SetHandler(cli.Default)
|
||||||
|
},
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
client := remote.NewFromConfig(config.Get())
|
||||||
|
manager, err := server.NewManager(cmd.Context(), client, true)
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("error", err).Fatal("failed to create new server manager")
|
||||||
|
}
|
||||||
|
c := &MigrateVHDCommand{
|
||||||
|
manager: manager,
|
||||||
|
}
|
||||||
|
if err := c.Run(cmd.Context()); err != nil {
|
||||||
|
log.WithField("error", err).Fatal("failed to execute command")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run executes the migration command.
|
||||||
|
func (m *MigrateVHDCommand) Run(ctx context.Context) error {
|
||||||
|
if !vhd.Enabled() {
|
||||||
|
return errors.New("cannot migrate to vhd: the underlying driver must be set to \"vhd\"")
|
||||||
|
}
|
||||||
|
for _, s := range m.manager.All() {
|
||||||
|
s.Log().Debug("starting migration of server contents to virtual disk...")
|
||||||
|
|
||||||
|
v := vhd.New(s.DiskSpace(), vhd.DiskPath(s.ID()), s.Filesystem().Path())
|
||||||
|
s.Log().WithField("disk_image", v.Path()).Info("creating virtual disk for server")
|
||||||
|
if err := v.Allocate(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Log().Info("creating virtual filesystem for server")
|
||||||
|
if err := v.MakeFilesystem(ctx); err != nil {
|
||||||
|
// If the filesystem already exists no worries, just move on with our
|
||||||
|
// day here.
|
||||||
|
if !errors.Is(err, vhd.ErrFilesystemExists) {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bak := strings.TrimSuffix(s.Filesystem().Path(), "/") + "_bak"
|
||||||
|
mounted, err := v.IsMounted(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if !mounted {
|
||||||
|
s.Log().WithField("backup_dir", bak).Debug("virtual disk is not yet mounted, creating backup directory")
|
||||||
|
// Create a backup directory of the server files if one does not already exist
|
||||||
|
// at that location. If one does exists we'll just assume it is good to go and
|
||||||
|
// rely on it to provide the files we'll need.
|
||||||
|
if _, err := os.Lstat(bak); os.IsNotExist(err) {
|
||||||
|
if err := os.Rename(s.Filesystem().Path(), bak); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to rename existing data directory for backup")
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
if err := os.RemoveAll(s.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
|
||||||
|
return errors.Wrap(err, "failed to remove base server files path")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
s.Log().Warn("server appears to already have existing mount, not creating data backup")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt to mount the disk at the expected path now that we've created
|
||||||
|
// a backup of the server files.
|
||||||
|
if err := v.Mount(ctx); err != nil && !errors.Is(err, vhd.ErrFilesystemMounted) {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy over the files from the backup for this server but only
|
||||||
|
// if we have a backup directory currently.
|
||||||
|
_, err = os.Lstat(bak)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
s.Log().WithField("error", err).Warn("failed to stat backup directory")
|
||||||
|
} else {
|
||||||
|
s.Log().Info("no backup data directory exists, not restoring files")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
cmd := exec.CommandContext(ctx, "cp", "-r", bak+"/.", s.Filesystem().Path())
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return errors.Wrap(err, "migrate: failed to move old server files into new direcotry")
|
||||||
|
} else {
|
||||||
|
if err := os.RemoveAll(bak); err != nil {
|
||||||
|
s.Log().WithField("directory", bak).WithField("error", err).Warn("failed to remove backup directory")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Log().Info("updating server file ownership...")
|
||||||
|
if err := s.Filesystem().Chown("/"); err != nil {
|
||||||
|
s.Log().WithField("error", err).Warn("failed to update ownership of new server files")
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Log().Info("finished migration to virtual disk...")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
57
cmd/root.go
57
cmd/root.go
@@ -9,15 +9,16 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/cron"
|
||||||
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
|
|
||||||
"github.com/NYTimes/logrotate"
|
"github.com/NYTimes/logrotate"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/apex/log/handlers/multi"
|
"github.com/apex/log/handlers/multi"
|
||||||
@@ -30,7 +31,6 @@ import (
|
|||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
"github.com/pterodactyl/wings/internal/notify"
|
|
||||||
"github.com/pterodactyl/wings/loggers/cli"
|
"github.com/pterodactyl/wings/loggers/cli"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
"github.com/pterodactyl/wings/router"
|
"github.com/pterodactyl/wings/router"
|
||||||
@@ -47,8 +47,16 @@ var (
|
|||||||
var rootCommand = &cobra.Command{
|
var rootCommand = &cobra.Command{
|
||||||
Use: "wings",
|
Use: "wings",
|
||||||
Short: "Runs the API server allowing programmatic control of game servers for Pterodactyl Panel.",
|
Short: "Runs the API server allowing programmatic control of game servers for Pterodactyl Panel.",
|
||||||
PreRun: func(cmd *cobra.Command, args []string) {
|
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||||
initConfig()
|
initConfig()
|
||||||
|
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
|
||||||
|
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
|
||||||
|
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
PreRun: func(cmd *cobra.Command, args []string) {
|
||||||
initLogging()
|
initLogging()
|
||||||
if tls, _ := cmd.Flags().GetBool("auto-tls"); tls {
|
if tls, _ := cmd.Flags().GetBool("auto-tls"); tls {
|
||||||
if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" {
|
if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" {
|
||||||
@@ -77,18 +85,19 @@ func Execute() {
|
|||||||
func init() {
|
func init() {
|
||||||
rootCommand.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
rootCommand.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
||||||
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
||||||
|
rootCommand.PersistentFlags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
|
||||||
|
|
||||||
// Flags specifically used when running the API.
|
// Flags specifically used when running the API.
|
||||||
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
|
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
|
||||||
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
|
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
|
||||||
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
|
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
|
||||||
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
|
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
|
||||||
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
||||||
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
|
|
||||||
|
|
||||||
rootCommand.AddCommand(versionCommand)
|
rootCommand.AddCommand(versionCommand)
|
||||||
rootCommand.AddCommand(configureCmd)
|
rootCommand.AddCommand(configureCmd)
|
||||||
rootCommand.AddCommand(newDiagnosticsCommand())
|
rootCommand.AddCommand(newDiagnosticsCommand())
|
||||||
|
rootCommand.AddCommand(newMigrateVHDCommand())
|
||||||
}
|
}
|
||||||
|
|
||||||
func rootCmdRun(cmd *cobra.Command, _ []string) {
|
func rootCmdRun(cmd *cobra.Command, _ []string) {
|
||||||
@@ -96,13 +105,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
log.Debug("running in debug mode")
|
log.Debug("running in debug mode")
|
||||||
log.WithField("config_file", configPath).Info("loading configuration from file")
|
log.WithField("config_file", configPath).Info("loading configuration from file")
|
||||||
|
|
||||||
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
|
|
||||||
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
|
|
||||||
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := config.ConfigureTimezone(); err != nil {
|
if err := config.ConfigureTimezone(); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
|
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
|
||||||
}
|
}
|
||||||
@@ -133,7 +135,11 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
|
||||||
manager, err := server.NewManager(cmd.Context(), pclient)
|
if err := database.Initialize(); err != nil {
|
||||||
|
log.WithField("error", err).Fatal("failed to initialize database")
|
||||||
|
}
|
||||||
|
|
||||||
|
manager, err := server.NewManager(cmd.Context(), pclient, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to load server configurations")
|
log.WithField("error", err).Fatal("failed to load server configurations")
|
||||||
}
|
}
|
||||||
@@ -159,7 +165,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
ticker := time.NewTicker(time.Minute)
|
ticker := time.NewTicker(time.Minute)
|
||||||
// Every minute, write the current server states to the disk to allow for a more
|
// Every minute, write the current server states to the disk to allow for a more
|
||||||
// seamless hard-reboot process in which wings will re-sync server states based
|
// seamless hard-reboot process in which wings will re-sync server states based
|
||||||
// on it's last tracked state.
|
// on its last tracked state.
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -262,6 +268,13 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
if s, err := cron.Scheduler(cmd.Context(), manager); err != nil {
|
||||||
|
log.WithField("error", err).Fatal("failed to initialize cron system")
|
||||||
|
} else {
|
||||||
|
log.WithField("subsystem", "cron").Info("starting cron processes")
|
||||||
|
s.StartAsync()
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
// Run the SFTP server.
|
// Run the SFTP server.
|
||||||
if err := sftp.New(manager).Run(); err != nil {
|
if err := sftp.New(manager).Run(); err != nil {
|
||||||
@@ -327,7 +340,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if the server should run with TLS but using autocert.
|
// Check if the server should run with TLS but using autocert.
|
||||||
go func(s *http.Server, api config.ApiConfiguration, sys config.SystemConfiguration, autotls bool, tlshostname string) {
|
|
||||||
if autotls {
|
if autotls {
|
||||||
m := autocert.Manager{
|
m := autocert.Manager{
|
||||||
Prompt: autocert.AcceptTOS,
|
Prompt: autocert.AcceptTOS,
|
||||||
@@ -366,19 +378,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||||||
if err := s.ListenAndServe(); err != nil {
|
if err := s.ListenAndServe(); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to configure HTTP server")
|
log.WithField("error", err).Fatal("failed to configure HTTP server")
|
||||||
}
|
}
|
||||||
}(s, api, sys, autotls, tlshostname)
|
|
||||||
|
|
||||||
if err := notify.Readiness(); err != nil {
|
|
||||||
log.WithField("error", err).Error("failed to notify systemd of readiness state")
|
|
||||||
}
|
|
||||||
|
|
||||||
c := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
<-c
|
|
||||||
|
|
||||||
if err := notify.Stopping(); err != nil {
|
|
||||||
log.WithField("error", err).Error("failed to notify systemd of stopping state")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads the configuration from the disk and then sets up the global singleton
|
// Reads the configuration from the disk and then sets up the global singleton
|
||||||
|
|||||||
@@ -91,6 +91,9 @@ type ApiConfiguration struct {
|
|||||||
|
|
||||||
// The maximum size for files uploaded through the Panel in MB.
|
// The maximum size for files uploaded through the Panel in MB.
|
||||||
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
||||||
|
|
||||||
|
// A list of IP address of proxies that may send a X-Forwarded-For header to set the true clients IP
|
||||||
|
TrustedProxies []string `json:"trusted_proxies" yaml:"trusted_proxies"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoteQueryConfiguration defines the configuration settings for remote requests
|
// RemoteQueryConfiguration defines the configuration settings for remote requests
|
||||||
@@ -163,6 +166,15 @@ type SystemConfiguration struct {
|
|||||||
// disk usage is not a concern.
|
// disk usage is not a concern.
|
||||||
DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"`
|
DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"`
|
||||||
|
|
||||||
|
// ActivitySendInterval is the amount of time that should ellapse between aggregated server activity
|
||||||
|
// being sent to the Panel. By default this will send activity collected over the last minute. Keep
|
||||||
|
// in mind that only a fixed number of activity log entries, defined by ActivitySendCount, will be sent
|
||||||
|
// in each run.
|
||||||
|
ActivitySendInterval int `default:"60" yaml:"activity_send_interval"`
|
||||||
|
|
||||||
|
// ActivitySendCount is the number of activity events to send per batch.
|
||||||
|
ActivitySendCount int `default:"100" yaml:"activity_send_count"`
|
||||||
|
|
||||||
// If set to true, file permissions for a server will be checked when the process is
|
// If set to true, file permissions for a server will be checked when the process is
|
||||||
// booted. This can cause boot delays if the server has a large amount of files. In most
|
// booted. This can cause boot delays if the server has a large amount of files. In most
|
||||||
// cases disabling this should not have any major impact unless external processes are
|
// cases disabling this should not have any major impact unless external processes are
|
||||||
@@ -210,6 +222,15 @@ type Backups struct {
|
|||||||
//
|
//
|
||||||
// Defaults to 0 (unlimited)
|
// Defaults to 0 (unlimited)
|
||||||
WriteLimit int `default:"0" yaml:"write_limit"`
|
WriteLimit int `default:"0" yaml:"write_limit"`
|
||||||
|
|
||||||
|
// CompressionLevel determines how much backups created by wings should be compressed.
|
||||||
|
//
|
||||||
|
// "none" -> no compression will be applied
|
||||||
|
// "best_speed" -> uses gzip level 1 for fast speed
|
||||||
|
// "best_compression" -> uses gzip level 9 for minimal disk space useage
|
||||||
|
//
|
||||||
|
// Defaults to "best_speed" (level 1)
|
||||||
|
CompressionLevel string `default:"best_speed" yaml:"compression_level"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Transfers struct {
|
type Transfers struct {
|
||||||
@@ -284,6 +305,11 @@ type Configuration struct {
|
|||||||
// is only required by users running Wings without SSL certificates and using internal IP
|
// is only required by users running Wings without SSL certificates and using internal IP
|
||||||
// addresses in order to connect. Most users should NOT enable this setting.
|
// addresses in order to connect. Most users should NOT enable this setting.
|
||||||
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
|
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
|
||||||
|
|
||||||
|
// Servers contains all of the settings that are used when configuring individual servers
|
||||||
|
// on the system. This is a global configuration for all server instances, not to be confused
|
||||||
|
// with the per-server configurations provided by the Panel API.
|
||||||
|
Servers Servers `json:"servers" yaml:"servers"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAtPath creates a new struct and set the path where it should be stored.
|
// NewAtPath creates a new struct and set the path where it should be stored.
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ type DockerNetworkConfiguration struct {
|
|||||||
Mode string `default:"pterodactyl_nw" yaml:"network_mode"`
|
Mode string `default:"pterodactyl_nw" yaml:"network_mode"`
|
||||||
IsInternal bool `default:"false" yaml:"is_internal"`
|
IsInternal bool `default:"false" yaml:"is_internal"`
|
||||||
EnableICC bool `default:"true" yaml:"enable_icc"`
|
EnableICC bool `default:"true" yaml:"enable_icc"`
|
||||||
|
NetworkMTU int64 `default:"1500" yaml:"network_mtu"`
|
||||||
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
|
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,6 +78,14 @@ type DockerConfiguration struct {
|
|||||||
Overhead Overhead `json:"overhead" yaml:"overhead"`
|
Overhead Overhead `json:"overhead" yaml:"overhead"`
|
||||||
|
|
||||||
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
|
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
|
||||||
|
|
||||||
|
// Sets the user namespace mode for the container when user namespace remapping option is
|
||||||
|
// enabled.
|
||||||
|
//
|
||||||
|
// If the value is blank, the daemon's user namespace remapping configuration is used,
|
||||||
|
// if the value is "host", then the pterodactyl containers are started with user namespace
|
||||||
|
// remapping disabled
|
||||||
|
UsernsMode string `default:"" json:"userns_mode" yaml:"userns_mode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegistryConfiguration defines the authentication credentials for a given
|
// RegistryConfiguration defines the authentication credentials for a given
|
||||||
|
|||||||
28
config/config_servers.go
Normal file
28
config/config_servers.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
type FSDriver string
|
||||||
|
|
||||||
|
const (
|
||||||
|
FSDriverLocal FSDriver = "local"
|
||||||
|
FSDriverVHD FSDriver = "vhd"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Servers struct {
|
||||||
|
// Filesystem defines all of the filesystem specific settings used for servers.
|
||||||
|
Filesystem Filesystem `json:"filesystem" yaml:"filesystem"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Filesystem struct {
|
||||||
|
// Driver defines the underlying filesystem driver that is used when a server is
|
||||||
|
// created on the system. This currently supports either of the following drivers:
|
||||||
|
//
|
||||||
|
// local: the local driver is the default one used by Wings. This offloads all of the
|
||||||
|
// disk limit enforcement to Wings itself. This has a performance impact but is
|
||||||
|
// the most compatiable with all systems.
|
||||||
|
// vhd: the vhd driver uses "virtual" disks on the host system to enforce disk limits
|
||||||
|
// on the server. This is more performant since calculations do not need to be made
|
||||||
|
// by Wings itself when enforcing limits. It also avoids vulnerabilities that exist
|
||||||
|
// in the local driver which allow malicious processes to quickly create massive files
|
||||||
|
// before Wings is able to detect and stop them from being written.
|
||||||
|
Driver FSDriver `default:"local" json:"driver" yaml:"driver"`
|
||||||
|
}
|
||||||
@@ -12,6 +12,11 @@ import (
|
|||||||
// Defines the allocations available for a given server. When using the Docker environment
|
// Defines the allocations available for a given server. When using the Docker environment
|
||||||
// driver these correspond to mappings for the container that allow external connections.
|
// driver these correspond to mappings for the container that allow external connections.
|
||||||
type Allocations struct {
|
type Allocations struct {
|
||||||
|
// ForceOutgoingIP causes a dedicated bridge network to be created for the
|
||||||
|
// server with a special option, causing Docker to SNAT outgoing traffic to
|
||||||
|
// the DefaultMapping's IP. This is important to servers which rely on external
|
||||||
|
// services that check the IP of the server (Source Engine servers, for example).
|
||||||
|
ForceOutgoingIP bool `json:"force_outgoing_ip"`
|
||||||
// Defines the default allocation that should be used for this server. This is
|
// Defines the default allocation that should be used for this server. This is
|
||||||
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
||||||
// files or the startup arguments for a server.
|
// files or the startup arguments for a server.
|
||||||
|
|||||||
@@ -41,12 +41,12 @@ func ConfigureDocker(ctx context.Context) error {
|
|||||||
nw := config.Get().Docker.Network
|
nw := config.Get().Docker.Network
|
||||||
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
|
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if client.IsErrNotFound(err) {
|
if !client.IsErrNotFound(err) {
|
||||||
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
|
|
||||||
if err := createDockerNetwork(ctx, cli); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
|
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
|
||||||
|
if err := createDockerNetwork(ctx, cli); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -92,7 +92,7 @@ func createDockerNetwork(ctx context.Context, cli *client.Client) error {
|
|||||||
"com.docker.network.bridge.enable_ip_masquerade": "true",
|
"com.docker.network.bridge.enable_ip_masquerade": "true",
|
||||||
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
|
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
|
||||||
"com.docker.network.bridge.name": "pterodactyl0",
|
"com.docker.network.bridge.name": "pterodactyl0",
|
||||||
"com.docker.network.driver.mtu": "1500",
|
"com.docker.network.driver.mtu": strconv.FormatInt(nw.NetworkMTU, 10),
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -147,10 +147,12 @@ func (e *Environment) InSituUpdate() error {
|
|||||||
// currently available for it. If the container already exists it will be
|
// currently available for it. If the container already exists it will be
|
||||||
// returned.
|
// returned.
|
||||||
func (e *Environment) Create() error {
|
func (e *Environment) Create() error {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
// If the container already exists don't hit the user with an error, just return
|
// If the container already exists don't hit the user with an error, just return
|
||||||
// the current information about it which is what we would do when creating the
|
// the current information about it which is what we would do when creating the
|
||||||
// container anyways.
|
// container anyways.
|
||||||
if _, err := e.ContainerInspect(context.Background()); err == nil {
|
if _, err := e.ContainerInspect(ctx); err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else if !client.IsErrNotFound(err) {
|
} else if !client.IsErrNotFound(err) {
|
||||||
return errors.Wrap(err, "environment/docker: failed to inspect container")
|
return errors.Wrap(err, "environment/docker: failed to inspect container")
|
||||||
@@ -190,7 +192,34 @@ func (e *Environment) Create() error {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpfsSize := strconv.Itoa(int(config.Get().Docker.TmpfsSize))
|
networkMode := container.NetworkMode(config.Get().Docker.Network.Mode)
|
||||||
|
if a.ForceOutgoingIP {
|
||||||
|
e.log().Debug("environment/docker: forcing outgoing IP address")
|
||||||
|
networkName := strings.ReplaceAll(e.Id, "-", "")
|
||||||
|
networkMode = container.NetworkMode(networkName)
|
||||||
|
|
||||||
|
if _, err := e.client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}); err != nil {
|
||||||
|
if !client.IsErrNotFound(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := e.client.NetworkCreate(ctx, networkName, types.NetworkCreate{
|
||||||
|
Driver: "bridge",
|
||||||
|
EnableIPv6: false,
|
||||||
|
Internal: false,
|
||||||
|
Attachable: false,
|
||||||
|
Ingress: false,
|
||||||
|
ConfigOnly: false,
|
||||||
|
Options: map[string]string{
|
||||||
|
"encryption": "false",
|
||||||
|
"com.docker.network.bridge.default_bridge": "false",
|
||||||
|
"com.docker.network.host_ipv4": a.DefaultMapping.Ip,
|
||||||
|
},
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
hostConf := &container.HostConfig{
|
hostConf := &container.HostConfig{
|
||||||
PortBindings: a.DockerBindings(),
|
PortBindings: a.DockerBindings(),
|
||||||
@@ -202,7 +231,7 @@ func (e *Environment) Create() error {
|
|||||||
// Configure the /tmp folder mapping in containers. This is necessary for some
|
// Configure the /tmp folder mapping in containers. This is necessary for some
|
||||||
// games that need to make use of it for downloads and other installation processes.
|
// games that need to make use of it for downloads and other installation processes.
|
||||||
Tmpfs: map[string]string{
|
Tmpfs: map[string]string{
|
||||||
"/tmp": "rw,exec,nosuid,size=" + tmpfsSize + "M",
|
"/tmp": "rw,exec,nosuid,size=" + strconv.Itoa(int(config.Get().Docker.TmpfsSize)) + "M",
|
||||||
},
|
},
|
||||||
|
|
||||||
// Define resource limits for the container based on the data passed through
|
// Define resource limits for the container based on the data passed through
|
||||||
@@ -231,10 +260,11 @@ func (e *Environment) Create() error {
|
|||||||
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
||||||
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
||||||
},
|
},
|
||||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
NetworkMode: networkMode,
|
||||||
|
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil {
|
if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {
|
||||||
return errors.Wrap(err, "environment/docker: failed to create container")
|
return errors.Wrap(err, "environment/docker: failed to create container")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -480,21 +510,3 @@ func (e *Environment) convertMounts() []mount.Mount {
|
|||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Environment) resources() container.Resources {
|
|
||||||
l := e.Configuration.Limits()
|
|
||||||
pids := l.ProcessLimit()
|
|
||||||
|
|
||||||
return container.Resources{
|
|
||||||
Memory: l.BoundedMemoryLimit(),
|
|
||||||
MemoryReservation: l.MemoryLimit * 1_000_000,
|
|
||||||
MemorySwap: l.ConvertedSwap(),
|
|
||||||
CPUQuota: l.ConvertedCpuLimit(),
|
|
||||||
CPUPeriod: 100_000,
|
|
||||||
CPUShares: 1024,
|
|
||||||
BlkioWeight: l.IoWeight,
|
|
||||||
OomKillDisable: &l.OOMDisabled,
|
|
||||||
CpusetCpus: l.Threads,
|
|
||||||
PidsLimit: &pids,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
|
|||||||
@@ -99,21 +99,36 @@ func (l Limits) ProcessLimit() int64 {
|
|||||||
return config.Get().Docker.ContainerPidLimit
|
return config.Get().Docker.ContainerPidLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AsContainerResources returns the available resources for a container in a format
|
||||||
|
// that Docker understands.
|
||||||
func (l Limits) AsContainerResources() container.Resources {
|
func (l Limits) AsContainerResources() container.Resources {
|
||||||
pids := l.ProcessLimit()
|
pids := l.ProcessLimit()
|
||||||
|
resources := container.Resources{
|
||||||
return container.Resources{
|
|
||||||
Memory: l.BoundedMemoryLimit(),
|
Memory: l.BoundedMemoryLimit(),
|
||||||
MemoryReservation: l.MemoryLimit * 1_000_000,
|
MemoryReservation: l.MemoryLimit * 1_000_000,
|
||||||
MemorySwap: l.ConvertedSwap(),
|
MemorySwap: l.ConvertedSwap(),
|
||||||
CPUQuota: l.ConvertedCpuLimit(),
|
|
||||||
CPUPeriod: 100_000,
|
|
||||||
CPUShares: 1024,
|
|
||||||
BlkioWeight: l.IoWeight,
|
BlkioWeight: l.IoWeight,
|
||||||
OomKillDisable: &l.OOMDisabled,
|
OomKillDisable: &l.OOMDisabled,
|
||||||
CpusetCpus: l.Threads,
|
|
||||||
PidsLimit: &pids,
|
PidsLimit: &pids,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the CPU Limit is not set, don't send any of these fields through. Providing
|
||||||
|
// them seems to break some Java services that try to read the available processors.
|
||||||
|
//
|
||||||
|
// @see https://github.com/pterodactyl/panel/issues/3988
|
||||||
|
if l.CpuLimit > 0 {
|
||||||
|
resources.CPUQuota = l.CpuLimit * 1_000
|
||||||
|
resources.CPUPeriod = 100_000
|
||||||
|
resources.CPUShares = 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
// Similar to above, don't set the specific assigned CPUs if we didn't actually limit
|
||||||
|
// the server to any of them.
|
||||||
|
if l.Threads != "" {
|
||||||
|
resources.CpusetCpus = l.Threads
|
||||||
|
}
|
||||||
|
|
||||||
|
return resources
|
||||||
}
|
}
|
||||||
|
|
||||||
type Variables map[string]interface{}
|
type Variables map[string]interface{}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
114
go.mod
114
go.mod
@@ -1,115 +1,125 @@
|
|||||||
module github.com/pterodactyl/wings
|
module github.com/pterodactyl/wings
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
emperror.dev/errors v0.8.1
|
emperror.dev/errors v0.8.1
|
||||||
github.com/AlecAivazis/survey/v2 v2.3.4
|
github.com/AlecAivazis/survey/v2 v2.3.6
|
||||||
github.com/Jeffail/gabs/v2 v2.6.1
|
github.com/Jeffail/gabs/v2 v2.6.1
|
||||||
github.com/NYTimes/logrotate v1.0.0
|
github.com/NYTimes/logrotate v1.0.0
|
||||||
github.com/apex/log v1.9.0
|
github.com/apex/log v1.9.0
|
||||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||||
github.com/beevik/etree v1.1.0
|
github.com/beevik/etree v1.1.0
|
||||||
github.com/buger/jsonparser v1.1.1
|
github.com/buger/jsonparser v1.1.1
|
||||||
github.com/cenkalti/backoff/v4 v4.1.2
|
github.com/cenkalti/backoff/v4 v4.1.3
|
||||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
||||||
github.com/creasty/defaults v1.5.2
|
github.com/creasty/defaults v1.6.0
|
||||||
github.com/docker/docker v20.10.14+incompatible
|
github.com/docker/docker v20.10.18+incompatible
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/fatih/color v1.13.0
|
github.com/fatih/color v1.13.0
|
||||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
||||||
github.com/gabriel-vasile/mimetype v1.4.0
|
github.com/gabriel-vasile/mimetype v1.4.1
|
||||||
github.com/gammazero/workerpool v1.1.2
|
github.com/gammazero/workerpool v1.1.3
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
github.com/gin-gonic/gin v1.7.7
|
github.com/gin-gonic/gin v1.8.1
|
||||||
|
github.com/glebarez/sqlite v1.4.8
|
||||||
|
github.com/go-co-op/gocron v1.17.0
|
||||||
|
github.com/goccy/go-json v0.9.11
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/gorilla/websocket v1.5.0
|
||||||
github.com/iancoleman/strcase v0.2.0
|
github.com/iancoleman/strcase v0.2.0
|
||||||
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
|
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
|
||||||
github.com/juju/ratelimit v1.0.1
|
github.com/juju/ratelimit v1.0.2
|
||||||
github.com/karrick/godirwalk v1.16.1
|
github.com/karrick/godirwalk v1.17.0
|
||||||
|
github.com/klauspost/compress v1.15.11
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/magiconair/properties v1.8.6
|
github.com/magiconair/properties v1.8.6
|
||||||
github.com/mattn/go-colorable v0.1.12
|
github.com/mattn/go-colorable v0.1.13
|
||||||
github.com/mholt/archiver/v3 v3.5.1
|
github.com/mholt/archiver/v3 v3.5.1
|
||||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/sftp v1.13.4
|
github.com/pkg/sftp v1.13.5
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||||
github.com/spf13/cobra v1.4.0
|
github.com/spf13/afero v1.9.2
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/spf13/cobra v1.5.0
|
||||||
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
|
github.com/stretchr/testify v1.8.0
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
|
||||||
gopkg.in/ini.v1 v1.66.4
|
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
|
||||||
|
gopkg.in/ini.v1 v1.67.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
gorm.io/gorm v1.23.10
|
||||||
)
|
)
|
||||||
|
|
||||||
require github.com/goccy/go-json v0.9.6
|
|
||||||
|
|
||||||
require golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/containerd/containerd v1.6.2 // indirect
|
|
||||||
github.com/containerd/fifo v1.0.0 // indirect
|
github.com/containerd/fifo v1.0.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||||
github.com/gammazero/deque v0.1.1 // indirect
|
github.com/gammazero/deque v0.2.0 // indirect
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||||
|
github.com/glebarez/go-sqlite v1.19.1 // indirect
|
||||||
github.com/go-playground/locales v0.14.0 // indirect
|
github.com/go-playground/locales v0.14.0 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.10.1 // indirect
|
github.com/go-playground/validator/v10 v10.11.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/gorilla/mux v1.7.4 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
github.com/klauspost/compress v1.15.1 // indirect
|
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/leodido/go-urn v1.2.1 // indirect
|
github.com/leodido/go-urn v1.2.1 // indirect
|
||||||
github.com/magefile/mage v1.13.0 // indirect
|
github.com/magefile/mage v1.14.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/nwaples/rardecode v1.1.3 // indirect
|
github.com/nwaples/rardecode v1.1.3 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.14 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
|
||||||
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
|
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
github.com/ugorji/go/codec v1.2.7 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.uber.org/multierr v1.8.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
|
||||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
|
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/text v0.3.8 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||||
google.golang.org/grpc v1.45.0 // indirect
|
golang.org/x/tools v0.1.12 // indirect
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
|
modernc.org/libc v1.20.0 // indirect
|
||||||
|
modernc.org/mathutil v1.5.0 // indirect
|
||||||
|
modernc.org/memory v1.4.0 // indirect
|
||||||
|
modernc.org/sqlite v1.19.1 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/asaskevich/govalidator"
|
"github.com/asaskevich/govalidator"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
)
|
)
|
||||||
@@ -37,7 +38,7 @@ func New(ctx context.Context, manager *server.Manager, details ServerDetails) (*
|
|||||||
|
|
||||||
// Create a new server instance using the configuration we wrote to the disk
|
// Create a new server instance using the configuration we wrote to the disk
|
||||||
// so that everything gets instantiated correctly on the struct.
|
// so that everything gets instantiated correctly on the struct.
|
||||||
s, err := manager.InitServer(c)
|
s, err := manager.InitServer(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WrapIf(err, "installer: could not init server instance")
|
return nil, errors.WrapIf(err, "installer: could not init server instance")
|
||||||
}
|
}
|
||||||
|
|||||||
59
internal/cron/activity_cron.go
Normal file
59
internal/cron/activity_cron.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
"github.com/pterodactyl/wings/server"
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
type activityCron struct {
|
||||||
|
mu *system.AtomicBool
|
||||||
|
manager *server.Manager
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run executes the cronjob and ensures we fetch and send all of the stored activity to the
|
||||||
|
// Panel instance. Once activity is sent it is deleted from the local database instance. Any
|
||||||
|
// SFTP specific events are not handled in this cron, they're handled seperately to account
|
||||||
|
// for de-duplication and event merging.
|
||||||
|
func (ac *activityCron) Run(ctx context.Context) error {
|
||||||
|
// Don't execute this cron if there is currently one running. Once this task is completed
|
||||||
|
// go ahead and mark it as no longer running.
|
||||||
|
if !ac.mu.SwapIf(true) {
|
||||||
|
return errors.WithStack(ErrCronRunning)
|
||||||
|
}
|
||||||
|
defer ac.mu.Store(false)
|
||||||
|
|
||||||
|
var activity []models.Activity
|
||||||
|
tx := database.Instance().WithContext(ctx).
|
||||||
|
Where("event NOT LIKE ?", "server:sftp.%").
|
||||||
|
Limit(ac.max).
|
||||||
|
Find(&activity)
|
||||||
|
|
||||||
|
if tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
if len(activity) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ac.manager.Client().SendActivityLogs(ctx, activity); err != nil {
|
||||||
|
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
|
||||||
|
}
|
||||||
|
|
||||||
|
var ids []int
|
||||||
|
for _, v := range activity {
|
||||||
|
ids = append(ids, v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
|
||||||
|
if tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
73
internal/cron/cron.go
Normal file
73
internal/cron/cron.go
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
log2 "github.com/apex/log"
|
||||||
|
"github.com/go-co-op/gocron"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/server"
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ErrCronRunning = errors.Sentinel("cron: job already running")
|
||||||
|
|
||||||
|
var o system.AtomicBool
|
||||||
|
|
||||||
|
// Scheduler configures the internal cronjob system for Wings and returns the scheduler
|
||||||
|
// instance to the caller. This should only be called once per application lifecycle, additional
|
||||||
|
// calls will result in an error being returned.
|
||||||
|
func Scheduler(ctx context.Context, m *server.Manager) (*gocron.Scheduler, error) {
|
||||||
|
if !o.SwapIf(true) {
|
||||||
|
return nil, errors.New("cron: cannot call scheduler more than once in application lifecycle")
|
||||||
|
}
|
||||||
|
l, err := time.LoadLocation(config.Get().System.Timezone)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "cron: failed to parse configured system timezone")
|
||||||
|
}
|
||||||
|
|
||||||
|
activity := activityCron{
|
||||||
|
mu: system.NewAtomicBool(false),
|
||||||
|
manager: m,
|
||||||
|
max: config.Get().System.ActivitySendCount,
|
||||||
|
}
|
||||||
|
|
||||||
|
sftp := sftpCron{
|
||||||
|
mu: system.NewAtomicBool(false),
|
||||||
|
manager: m,
|
||||||
|
max: config.Get().System.ActivitySendCount,
|
||||||
|
}
|
||||||
|
|
||||||
|
s := gocron.NewScheduler(l)
|
||||||
|
log := log2.WithField("subsystem", "cron")
|
||||||
|
|
||||||
|
interval := time.Duration(config.Get().System.ActivitySendInterval) * time.Second
|
||||||
|
log.WithField("interval", interval).Info("configuring system crons")
|
||||||
|
|
||||||
|
_, _ = s.Tag("activity").Every(interval).Do(func() {
|
||||||
|
log.WithField("cron", "activity").Debug("sending internal activity events to Panel")
|
||||||
|
if err := activity.Run(ctx); err != nil {
|
||||||
|
if errors.Is(err, ErrCronRunning) {
|
||||||
|
log.WithField("cron", "activity").Warn("activity process is already running, skipping...")
|
||||||
|
} else {
|
||||||
|
log.WithField("cron", "activity").WithField("error", err).Error("activity process failed to execute")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
_, _ = s.Tag("sftp").Every(interval).Do(func() {
|
||||||
|
log.WithField("cron", "sftp").Debug("sending sftp events to Panel")
|
||||||
|
if err := sftp.Run(ctx); err != nil {
|
||||||
|
if errors.Is(err, ErrCronRunning) {
|
||||||
|
log.WithField("cron", "sftp").Warn("sftp events process already running, skipping...")
|
||||||
|
} else {
|
||||||
|
log.WithField("cron", "sftp").WithField("error", err).Error("sftp events process failed to execute")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
177
internal/cron/sftp_cron.go
Normal file
177
internal/cron/sftp_cron.go
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
package cron
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
"github.com/pterodactyl/wings/server"
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
type sftpCron struct {
|
||||||
|
mu *system.AtomicBool
|
||||||
|
manager *server.Manager
|
||||||
|
max int
|
||||||
|
}
|
||||||
|
|
||||||
|
type mapKey struct {
|
||||||
|
User string
|
||||||
|
Server string
|
||||||
|
IP string
|
||||||
|
Event models.Event
|
||||||
|
Timestamp string
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventMap struct {
|
||||||
|
max int
|
||||||
|
ids []int
|
||||||
|
m map[mapKey]*models.Activity
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run executes the SFTP reconciliation cron. This job will pull all of the SFTP specific events
|
||||||
|
// and merge them together across user, server, ip, and event. This allows a SFTP event that deletes
|
||||||
|
// tens or hundreds of files to be tracked as a single "deletion" event so long as they all occur
|
||||||
|
// within the same one minute period of time (starting at the first timestamp for the group). Without
|
||||||
|
// this we'd end up flooding the Panel event log with excessive data that is of no use to end users.
|
||||||
|
func (sc *sftpCron) Run(ctx context.Context) error {
|
||||||
|
if !sc.mu.SwapIf(true) {
|
||||||
|
return errors.WithStack(ErrCronRunning)
|
||||||
|
}
|
||||||
|
defer sc.mu.Store(false)
|
||||||
|
|
||||||
|
var o int
|
||||||
|
activity, err := sc.fetchRecords(ctx, o)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o += len(activity)
|
||||||
|
|
||||||
|
events := &eventMap{
|
||||||
|
m: map[mapKey]*models.Activity{},
|
||||||
|
ids: []int{},
|
||||||
|
max: sc.max,
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if len(activity) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
slen := len(events.ids)
|
||||||
|
for _, a := range activity {
|
||||||
|
events.Push(a)
|
||||||
|
}
|
||||||
|
if len(events.ids) > slen {
|
||||||
|
// Execute the query again, we found some events so we want to continue
|
||||||
|
// with this. Start at the next offset.
|
||||||
|
activity, err = sc.fetchRecords(ctx, o)
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
o += len(activity)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(events.m) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := sc.manager.Client().SendActivityLogs(ctx, events.Elements()); err != nil {
|
||||||
|
return errors.Wrap(err, "failed to send sftp activity logs to Panel")
|
||||||
|
}
|
||||||
|
if tx := database.Instance().Where("id IN ?", events.ids).Delete(&models.Activity{}); tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchRecords returns a group of activity events starting at the given offset. This is used
|
||||||
|
// since we might need to make multiple database queries to select enough events to properly
|
||||||
|
// fill up our request to the given maximum. This is due to the fact that this cron merges any
|
||||||
|
// activity that line up across user, server, ip, and event into a single activity record when
|
||||||
|
// sending the data to the Panel.
|
||||||
|
func (sc *sftpCron) fetchRecords(ctx context.Context, offset int) (activity []models.Activity, err error) {
|
||||||
|
tx := database.Instance().WithContext(ctx).
|
||||||
|
Where("event LIKE ?", "server:sftp.%").
|
||||||
|
Order("event DESC").
|
||||||
|
Offset(offset).
|
||||||
|
Limit(sc.max).
|
||||||
|
Find(&activity)
|
||||||
|
if tx.Error != nil {
|
||||||
|
err = errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push adds an activity to the event mapping, or de-duplicates it and merges the files metadata
|
||||||
|
// into the existing entity that exists.
|
||||||
|
func (em *eventMap) Push(a models.Activity) {
|
||||||
|
m := em.forActivity(a)
|
||||||
|
// If no activity entity is returned we've hit the cap for the number of events to
|
||||||
|
// send along to the Panel. Just skip over this record and we'll account for it in
|
||||||
|
// the next iteration.
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
em.ids = append(em.ids, a.ID)
|
||||||
|
// Always reduce this to the first timestamp that was recorded for the set
|
||||||
|
// of events, and not
|
||||||
|
if a.Timestamp.Before(m.Timestamp) {
|
||||||
|
m.Timestamp = a.Timestamp
|
||||||
|
}
|
||||||
|
list := m.Metadata["files"].([]interface{})
|
||||||
|
if s, ok := a.Metadata["files"]; ok {
|
||||||
|
v := reflect.ValueOf(s)
|
||||||
|
if v.Kind() != reflect.Slice || v.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
list = append(list, v.Index(i).Interface())
|
||||||
|
}
|
||||||
|
// You must set it again at the end of the process, otherwise you've only updated the file
|
||||||
|
// slice in this one loop since it isn't passed by reference. This is just shorter than having
|
||||||
|
// to explicitly keep casting it to the slice.
|
||||||
|
m.Metadata["files"] = list
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Elements returns the finalized activity models.
|
||||||
|
func (em *eventMap) Elements() (out []models.Activity) {
|
||||||
|
for _, v := range em.m {
|
||||||
|
out = append(out, *v)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// forActivity returns an event entity from our map which allows existing matches to be
|
||||||
|
// updated with additional files.
|
||||||
|
func (em *eventMap) forActivity(a models.Activity) *models.Activity {
|
||||||
|
key := mapKey{
|
||||||
|
User: a.User.String,
|
||||||
|
Server: a.Server,
|
||||||
|
IP: a.IP,
|
||||||
|
Event: a.Event,
|
||||||
|
// We group by the minute, don't care about the seconds for this logic.
|
||||||
|
Timestamp: a.Timestamp.Format("2006-01-02_15:04"),
|
||||||
|
}
|
||||||
|
if v, ok := em.m[key]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
// Cap the size of the events map at the defined maximum events to send to the Panel. Just
|
||||||
|
// return nil and let the caller handle it.
|
||||||
|
if len(em.m) >= em.max {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Doesn't exist in our map yet, create a copy of the activity passed into this
|
||||||
|
// function and then assign it into the map with an empty metadata value.
|
||||||
|
v := a
|
||||||
|
v.Metadata = models.ActivityMeta{
|
||||||
|
"files": make([]interface{}, 0),
|
||||||
|
}
|
||||||
|
em.m[key] = &v
|
||||||
|
return &v
|
||||||
|
}
|
||||||
61
internal/database/database.go
Normal file
61
internal/database/database.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"github.com/glebarez/sqlite"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"gorm.io/gorm/logger"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
o system.AtomicBool
|
||||||
|
db *gorm.DB
|
||||||
|
)
|
||||||
|
|
||||||
|
// Initialize configures the local SQLite database for Wings and ensures that the models have
|
||||||
|
// been fully migrated.
|
||||||
|
func Initialize() error {
|
||||||
|
if !o.SwapIf(true) {
|
||||||
|
panic("database: attempt to initialize more than once during application lifecycle")
|
||||||
|
}
|
||||||
|
p := filepath.Join(config.Get().System.RootDirectory, "wings.db")
|
||||||
|
instance, err := gorm.Open(sqlite.Open(p), &gorm.Config{
|
||||||
|
Logger: logger.Default.LogMode(logger.Silent),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "database: could not open database file")
|
||||||
|
}
|
||||||
|
db = instance
|
||||||
|
if sql, err := db.DB(); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
} else {
|
||||||
|
sql.SetMaxOpenConns(1)
|
||||||
|
sql.SetConnMaxLifetime(time.Hour)
|
||||||
|
}
|
||||||
|
if tx := db.Exec("PRAGMA synchronous = OFF"); tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
if tx := db.Exec("PRAGMA journal_mode = MEMORY"); tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
if err := db.AutoMigrate(&models.Activity{}); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instance returns the gorm database instance that was configured when the application was
|
||||||
|
// booted.
|
||||||
|
func Instance() *gorm.DB {
|
||||||
|
if db == nil {
|
||||||
|
panic("database: attempt to access instance before initialized")
|
||||||
|
}
|
||||||
|
return db
|
||||||
|
}
|
||||||
69
internal/models/activity.go
Normal file
69
internal/models/activity.go
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gorm.io/gorm"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event string
|
||||||
|
|
||||||
|
type ActivityMeta map[string]interface{}
|
||||||
|
|
||||||
|
// Activity defines an activity log event for a server entity performed by a user. This is
|
||||||
|
// used for tracking commands, power actions, and SFTP events so that they can be reconciled
|
||||||
|
// and sent back to the Panel instance to be displayed to the user.
|
||||||
|
type Activity struct {
|
||||||
|
ID int `gorm:"primaryKey;not null" json:"-"`
|
||||||
|
// User is UUID of the user that triggered this event, or an empty string if the event
|
||||||
|
// cannot be tied to a specific user, in which case we will assume it was the system
|
||||||
|
// user.
|
||||||
|
User JsonNullString `gorm:"type:uuid" json:"user"`
|
||||||
|
// Server is the UUID of the server this event is associated with.
|
||||||
|
Server string `gorm:"type:uuid;not null" json:"server"`
|
||||||
|
// Event is a string that describes what occurred, and is used by the Panel instance to
|
||||||
|
// properly associate this event in the activity logs.
|
||||||
|
Event Event `gorm:"index;not null" json:"event"`
|
||||||
|
// Metadata is either a null value, string, or a JSON blob with additional event specific
|
||||||
|
// metadata that can be provided.
|
||||||
|
Metadata ActivityMeta `gorm:"serializer:json" json:"metadata"`
|
||||||
|
// IP is the IP address that triggered this event, or an empty string if it cannot be
|
||||||
|
// determined properly. This should be the connecting user's IP address, and not the
|
||||||
|
// internal system IP.
|
||||||
|
IP string `gorm:"not null" json:"ip"`
|
||||||
|
Timestamp time.Time `gorm:"not null" json:"timestamp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the current user that performed the action. If an empty string is provided
|
||||||
|
// it is cast into a null value when stored.
|
||||||
|
func (a Activity) SetUser(u string) *Activity {
|
||||||
|
var ns JsonNullString
|
||||||
|
if u == "" {
|
||||||
|
if err := ns.Scan(nil); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := ns.Scan(u); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.User = ns
|
||||||
|
return &a
|
||||||
|
}
|
||||||
|
|
||||||
|
// BeforeCreate executes before we create any activity entry to ensure the IP address
|
||||||
|
// is trimmed down to remove any extraneous data, and the timestamp is set to the current
|
||||||
|
// system time and then stored as UTC.
|
||||||
|
func (a *Activity) BeforeCreate(_ *gorm.DB) error {
|
||||||
|
a.IP = system.TrimIPSuffix(a.IP)
|
||||||
|
if a.Timestamp.IsZero() {
|
||||||
|
a.Timestamp = time.Now()
|
||||||
|
}
|
||||||
|
a.Timestamp = a.Timestamp.UTC()
|
||||||
|
if a.Metadata == nil {
|
||||||
|
a.Metadata = ActivityMeta{}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
32
internal/models/models.go
Normal file
32
internal/models/models.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"github.com/goccy/go-json"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JsonNullString struct {
|
||||||
|
sql.NullString
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v JsonNullString) MarshalJSON() ([]byte, error) {
|
||||||
|
if v.Valid {
|
||||||
|
return json.Marshal(v.String)
|
||||||
|
} else {
|
||||||
|
return json.Marshal(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *JsonNullString) UnmarshalJSON(data []byte) error {
|
||||||
|
var s *string
|
||||||
|
if err := json.Unmarshal(data, &s); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
if s != nil {
|
||||||
|
v.String = *s
|
||||||
|
}
|
||||||
|
v.Valid = s != nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
// Package notify handles notifying the operating system of the program's state.
|
|
||||||
//
|
|
||||||
// For linux based operating systems, this is done through the systemd socket
|
|
||||||
// set by "NOTIFY_SOCKET" environment variable.
|
|
||||||
//
|
|
||||||
// Currently, no other operating systems are supported.
|
|
||||||
package notify
|
|
||||||
|
|
||||||
func Readiness() error {
|
|
||||||
return readiness()
|
|
||||||
}
|
|
||||||
|
|
||||||
func Reloading() error {
|
|
||||||
return reloading()
|
|
||||||
}
|
|
||||||
|
|
||||||
func Stopping() error {
|
|
||||||
return stopping()
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
package notify
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func notify(path string, r io.Reader) error {
|
|
||||||
s := &net.UnixAddr{
|
|
||||||
Name: path,
|
|
||||||
Net: "unixgram",
|
|
||||||
}
|
|
||||||
c, err := net.DialUnix(s.Net, nil, s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer c.Close()
|
|
||||||
|
|
||||||
if _, err := io.Copy(c, r); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func socketNotify(payload string) error {
|
|
||||||
v, ok := os.LookupEnv("NOTIFY_SOCKET")
|
|
||||||
if !ok || v == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err := notify(v, strings.NewReader(payload)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readiness() error {
|
|
||||||
return socketNotify("READY=1")
|
|
||||||
}
|
|
||||||
|
|
||||||
func reloading() error {
|
|
||||||
return socketNotify("RELOADING=1")
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopping() error {
|
|
||||||
return socketNotify("STOPPING=1")
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
//go:build !linux
|
|
||||||
// +build !linux
|
|
||||||
|
|
||||||
package notify
|
|
||||||
|
|
||||||
func readiness() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func reloading() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopping() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
330
internal/vhd/vhd.go
Normal file
330
internal/vhd/vhd.go
Normal file
@@ -0,0 +1,330 @@
|
|||||||
|
package vhd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidDiskPathTarget = errors.Sentinel("vhd: disk path is a directory or symlink")
|
||||||
|
ErrMountPathNotDirectory = errors.Sentinel("vhd: mount point is not a directory")
|
||||||
|
ErrFilesystemMounted = errors.Sentinel("vhd: filesystem is already mounted")
|
||||||
|
ErrFilesystemNotMounted = errors.Sentinel("vhd: filesystem is not mounted")
|
||||||
|
ErrFilesystemExists = errors.Sentinel("vhd: filesystem already exists on disk")
|
||||||
|
)
|
||||||
|
|
||||||
|
var useDdAllocation bool
|
||||||
|
var setDdAllocator sync.Once
|
||||||
|
|
||||||
|
// hasExitCode allows this code to test the response error to see if there is
|
||||||
|
// an exit code available from the command call that can be used to determine if
|
||||||
|
// something went wrong.
|
||||||
|
type hasExitCode interface {
|
||||||
|
ExitCode() int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commander defines an interface that must be met for executing commands on the
|
||||||
|
// underlying OS. By default the vhd package will use Go's exec.Cmd type for
|
||||||
|
// execution. This interface allows stubbing out on tests, or potentially custom
|
||||||
|
// setups down the line.
|
||||||
|
type Commander interface {
|
||||||
|
Run() error
|
||||||
|
Output() ([]byte, error)
|
||||||
|
String() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommanderProvider is a function that provides a struct meeting the Commander
|
||||||
|
// interface requirements.
|
||||||
|
type CommanderProvider func(ctx context.Context, name string, args ...string) Commander
|
||||||
|
|
||||||
|
// CfgOption is a configuration option callback for the Disk.
|
||||||
|
type CfgOption func(d *Disk) *Disk
|
||||||
|
|
||||||
|
// Disk represents the underlying virtual disk for the instance.
|
||||||
|
type Disk struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
// The total size of the disk allowed in bytes.
|
||||||
|
size int64
|
||||||
|
// The path where the disk image should be created.
|
||||||
|
diskPath string
|
||||||
|
// The point at which this disk should be made available on the system. This
|
||||||
|
// is where files can be read/written to.
|
||||||
|
mountAt string
|
||||||
|
fs afero.Fs
|
||||||
|
commander CommanderProvider
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiskPath returns the underlying path that contains the virtual disk for the server
|
||||||
|
// identified by its UUID.
|
||||||
|
func DiskPath(uuid string) string {
|
||||||
|
return filepath.Join(config.Get().System.Data, ".vhd/", uuid+".img")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled returns true when VHD support is enabled on the instance.
|
||||||
|
func Enabled() bool {
|
||||||
|
return config.Get().Servers.Filesystem.Driver == config.FSDriverVHD
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new Disk instance. The "size" parameter should be provided in
|
||||||
|
// bytes of space allowed for the disk. An additional slice of option callbacks
|
||||||
|
// can be provided to programatically swap out the underlying filesystem
|
||||||
|
// implementation or the underlying command exection engine.
|
||||||
|
func New(size int64, diskPath string, mountAt string, opts ...func(*Disk)) *Disk {
|
||||||
|
if diskPath == "" || mountAt == "" {
|
||||||
|
panic("vhd: cannot specify an empty disk or mount path")
|
||||||
|
}
|
||||||
|
d := Disk{
|
||||||
|
size: size,
|
||||||
|
diskPath: diskPath,
|
||||||
|
mountAt: mountAt,
|
||||||
|
fs: afero.NewOsFs(),
|
||||||
|
commander: func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return exec.CommandContext(ctx, name, args...)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&d)
|
||||||
|
}
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFs allows for a different underlying filesystem to be provided to the
|
||||||
|
// virtual disk manager.
|
||||||
|
func WithFs(fs afero.Fs) func(*Disk) {
|
||||||
|
return func(d *Disk) {
|
||||||
|
d.fs = fs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCommander allows a different Commander provider to be provided.
|
||||||
|
func WithCommander(c CommanderProvider) func(*Disk) {
|
||||||
|
return func(d *Disk) {
|
||||||
|
d.commander = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Disk) Path() string {
|
||||||
|
return d.diskPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Disk) MountPath() string {
|
||||||
|
return d.mountAt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exists reports if the disk exists on the system yet or not. This only verifies
|
||||||
|
// the presence of the disk image, not the validity of it. An error is returned
|
||||||
|
// if the path exists but the destination is not a file or is a symlink.
|
||||||
|
func (d *Disk) Exists() (bool, error) {
|
||||||
|
d.mu.RLock()
|
||||||
|
defer d.mu.RUnlock()
|
||||||
|
st, err := d.fs.Stat(d.diskPath)
|
||||||
|
if err != nil && os.IsNotExist(err) {
|
||||||
|
return false, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return false, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
if !st.IsDir() && st.Mode()&os.ModeSymlink == 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
return false, errors.WithStack(ErrInvalidDiskPathTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsMounted checks to see if the given disk is currently mounted.
|
||||||
|
func (d *Disk) IsMounted(ctx context.Context) (bool, error) {
|
||||||
|
find := d.mountAt + " ext4"
|
||||||
|
cmd := d.commander(ctx, "grep", "-qs", find, "/proc/mounts")
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
if v, ok := err.(hasExitCode); ok {
|
||||||
|
if v.ExitCode() == 1 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, errors.Wrap(err, "vhd: failed to execute grep for mount existence")
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount attempts to mount the disk as configured. If it does not exist or the
|
||||||
|
// mount command fails an error will be returned to the caller. This does not
|
||||||
|
// attempt to create the disk if it is missing from the filesystem.
|
||||||
|
//
|
||||||
|
// Attempting to mount a disk which does not exist will result in an error being
|
||||||
|
// returned to the caller. If the disk is already mounted an ErrFilesystemMounted
|
||||||
|
// error is returned to the caller.
|
||||||
|
func (d *Disk) Mount(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
return d.mount(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmount attempts to unmount the disk from the system. If the disk is not
|
||||||
|
// currently mounted this function is a no-op and ErrFilesystemNotMounted is
|
||||||
|
// returned to the caller.
|
||||||
|
func (d *Disk) Unmount(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
return d.unmount(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate executes the "fallocate" command on the disk. This will first unmount
|
||||||
|
// the disk from the system before attempting to actually allocate the space. If
|
||||||
|
// this disk already exists on the machine it will be resized accordingly.
|
||||||
|
//
|
||||||
|
// DANGER! This will unmount the disk from the machine while performing this
|
||||||
|
// action, use caution when calling it during normal processes.
|
||||||
|
func (d *Disk) Allocate(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
if exists, err := d.Exists(); exists {
|
||||||
|
// If the disk currently exists attempt to unmount the mount point before
|
||||||
|
// allocating space.
|
||||||
|
if err := d.Unmount(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to check for existence of root disk")
|
||||||
|
}
|
||||||
|
trim := path.Base(d.diskPath)
|
||||||
|
if err := d.fs.MkdirAll(strings.TrimSuffix(d.diskPath, trim), 0700); err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to create base vhd disk directory")
|
||||||
|
}
|
||||||
|
cmd := d.allocationCmd(ctx)
|
||||||
|
if _, err := cmd.Output(); err != nil {
|
||||||
|
msg := "vhd: failed to execute space allocation command"
|
||||||
|
if v, ok := err.(*exec.ExitError); ok {
|
||||||
|
stderr := strings.Trim(string(v.Stderr), ".\n")
|
||||||
|
if !useDdAllocation && strings.HasSuffix(stderr, "not supported") {
|
||||||
|
// Try again: fallocate is not supported on some filesystems so we'll fall
|
||||||
|
// back to making use of dd for subsequent operations.
|
||||||
|
setDdAllocator.Do(func() {
|
||||||
|
useDdAllocation = true
|
||||||
|
})
|
||||||
|
return d.Allocate(ctx)
|
||||||
|
}
|
||||||
|
msg = msg + ": " + stderr
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, msg)
|
||||||
|
}
|
||||||
|
return errors.WithStack(d.fs.Chmod(d.diskPath, 0600))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resize will change the internal disk size limit and then allocate the new
|
||||||
|
// space to the disk automatically.
|
||||||
|
func (d *Disk) Resize(ctx context.Context, size int64) error {
|
||||||
|
atomic.StoreInt64(&d.size, size)
|
||||||
|
return d.Allocate(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy removes the underlying allocated disk image and unmounts the disk.
|
||||||
|
func (d *Disk) Destroy(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
if err := d.unmount(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
return errors.WithStackIf(d.fs.RemoveAll(d.mountAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeFilesystem will attempt to execute the "mkfs" command against the disk on
|
||||||
|
// the machine. If the disk has already been created this command will return an
|
||||||
|
// ErrFilesystemExists error to the caller. You should manually unmount the disk
|
||||||
|
// if it shouldn't be mounted at this point.
|
||||||
|
func (d *Disk) MakeFilesystem(ctx context.Context) error {
|
||||||
|
d.mu.Lock()
|
||||||
|
defer d.mu.Unlock()
|
||||||
|
// If no error is returned when mounting DO NOT execute this command as it will
|
||||||
|
// completely destroy the data stored at that location.
|
||||||
|
err := d.Mount(ctx)
|
||||||
|
if err == nil || errors.Is(err, ErrFilesystemMounted) {
|
||||||
|
// If it wasn't already mounted try to clean up at this point and unmount
|
||||||
|
// the disk. If this fails just ignore it for now.
|
||||||
|
if err != nil {
|
||||||
|
_ = d.Unmount(ctx)
|
||||||
|
}
|
||||||
|
return ErrFilesystemExists
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), "can't find in /etc/fstab") && !strings.Contains(err.Error(), "exit status 32") {
|
||||||
|
return errors.WrapIf(err, "vhd: unexpected error from mount command")
|
||||||
|
}
|
||||||
|
// As long as we got an error back that was because we couldn't find thedisk
|
||||||
|
// in the /etc/fstab file we're good. Otherwise it means the disk probably exists
|
||||||
|
// or something else went wrong.
|
||||||
|
//
|
||||||
|
// Because this is a destructive command and non-tty based exection of it implies
|
||||||
|
// "-F" (force), we need to only run it when we can guarantee it doesn't already
|
||||||
|
// exist. No vague "maybe that error is expected" allowed here.
|
||||||
|
cmd := d.commander(ctx, "mkfs", "-t", "ext4", d.diskPath)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to make filesystem for disk")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Disk) mount(ctx context.Context) error {
|
||||||
|
if isMounted, err := d.IsMounted(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
} else if isMounted {
|
||||||
|
return ErrFilesystemMounted
|
||||||
|
}
|
||||||
|
|
||||||
|
if st, err := d.fs.Stat(d.mountAt); err != nil && !os.IsNotExist(err) {
|
||||||
|
return errors.Wrap(err, "vhd: failed to stat mount path")
|
||||||
|
} else if os.IsNotExist(err) {
|
||||||
|
if err := d.fs.MkdirAll(d.mountAt, 0700); err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to create mount path")
|
||||||
|
}
|
||||||
|
} else if !st.IsDir() {
|
||||||
|
return errors.WithStack(ErrMountPathNotDirectory)
|
||||||
|
}
|
||||||
|
|
||||||
|
u := config.Get().System.User
|
||||||
|
if err := d.fs.Chown(d.mountAt, u.Uid, u.Gid); err != nil {
|
||||||
|
return errors.Wrap(err, "vhd: failed to chown mount point")
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := d.commander(ctx, "mount", "-t", "auto", "-o", "loop", d.diskPath, d.mountAt)
|
||||||
|
if _, err := cmd.Output(); err != nil {
|
||||||
|
msg := "vhd: failed to mount disk"
|
||||||
|
if v, ok := err.(*exec.ExitError); ok {
|
||||||
|
msg = msg + ": " + strings.Trim(string(v.Stderr), ".\n")
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, msg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Disk) unmount(ctx context.Context) error {
|
||||||
|
cmd := d.commander(ctx, "umount", d.mountAt)
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
v, ok := err.(hasExitCode)
|
||||||
|
if ok && v.ExitCode() == 32 {
|
||||||
|
return ErrFilesystemNotMounted
|
||||||
|
}
|
||||||
|
return errors.Wrap(err, "vhd: failed to execute unmount command for disk")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocationCmd returns the command to allocate the disk image. This will attempt to
|
||||||
|
// use the fallocate command if available, otherwise it will fall back to dd if the
|
||||||
|
// fallocate command has previously failed.
|
||||||
|
//
|
||||||
|
// We use 1024 as the multiplier for all of the disk space logic within the application.
|
||||||
|
// Passing "K" (/1024) is the same as "KiB" for fallocate, but is different than "KB" (/1000).
|
||||||
|
func (d *Disk) allocationCmd(ctx context.Context) Commander {
|
||||||
|
s := atomic.LoadInt64(&d.size) / 1024
|
||||||
|
if useDdAllocation {
|
||||||
|
return d.commander(ctx, "dd", "if=/dev/zero", fmt.Sprintf("of=%s", d.diskPath), fmt.Sprintf("bs=%dk", s), "count=1")
|
||||||
|
}
|
||||||
|
return d.commander(ctx, "fallocate", "-l", fmt.Sprintf("%dK", s), d.diskPath)
|
||||||
|
}
|
||||||
476
internal/vhd/vhd_test.go
Normal file
476
internal/vhd/vhd_test.go
Normal file
@@ -0,0 +1,476 @@
|
|||||||
|
package vhd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/spf13/afero"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
config.Set(&config.Configuration{
|
||||||
|
AuthenticationToken: "token123",
|
||||||
|
System: config.SystemConfiguration{
|
||||||
|
User: struct {
|
||||||
|
Uid int
|
||||||
|
Gid int
|
||||||
|
}{Uid: 10, Gid: 10},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockCmd struct {
|
||||||
|
run func() error
|
||||||
|
output func() ([]byte, error)
|
||||||
|
string func() string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCmd) Run() error {
|
||||||
|
if m.run != nil {
|
||||||
|
return m.run()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCmd) Output() ([]byte, error) {
|
||||||
|
if m.output != nil {
|
||||||
|
return m.output()
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCmd) String() string {
|
||||||
|
if m.string != nil {
|
||||||
|
return m.string()
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Commander = (*mockCmd)(nil)
|
||||||
|
|
||||||
|
type mockedExitCode struct {
|
||||||
|
code int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockedExitCode) ExitCode() int {
|
||||||
|
return m.code
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockedExitCode) Error() string {
|
||||||
|
return fmt.Sprintf("mocked exit code: code %d", m.code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMockDisk(c CommanderProvider) *Disk {
|
||||||
|
commander := func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{}
|
||||||
|
}
|
||||||
|
w := commander
|
||||||
|
if c != nil {
|
||||||
|
w = c
|
||||||
|
}
|
||||||
|
return New(100 * 1024 * 1024, "/disk.img", "/mnt", WithFs(afero.NewMemMapFs()), WithCommander(w))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_New(t *testing.T) {
|
||||||
|
t.Run("creates expected struct", func(t *testing.T) {
|
||||||
|
d := New(100 * 1024 * 1024, "/disk.img", "/mnt")
|
||||||
|
assert.NotNil(t, d)
|
||||||
|
assert.Equal(t, int64(100 * 1024 * 1024), d.size)
|
||||||
|
assert.Equal(t, "/disk.img", d.diskPath)
|
||||||
|
assert.Equal(t, "/mnt", d.mountAt)
|
||||||
|
|
||||||
|
// Ensure by default we get a commander interface returned and that it
|
||||||
|
// returns an *exec.Cmd.
|
||||||
|
o := d.commander(context.TODO(), "foo", "-bar")
|
||||||
|
assert.NotNil(t, o)
|
||||||
|
_, ok := o.(Commander)
|
||||||
|
assert.True(t, ok)
|
||||||
|
_, ok = o.(*exec.Cmd)
|
||||||
|
assert.True(t, ok)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("creates an instance with custom options", func(t *testing.T) {
|
||||||
|
fs := afero.NewMemMapFs()
|
||||||
|
|
||||||
|
cprov := struct {
|
||||||
|
Commander
|
||||||
|
}{}
|
||||||
|
c := func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &cprov
|
||||||
|
}
|
||||||
|
|
||||||
|
d := New(100, "/disk.img", "/mnt", WithFs(fs), WithCommander(c))
|
||||||
|
assert.NotNil(t, d)
|
||||||
|
assert.Same(t, fs, d.fs)
|
||||||
|
assert.Same(t, &cprov, d.commander(context.TODO(), ""))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("panics if either path is empty", func(t *testing.T) {
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
_ = New(100, "", "/bar")
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
_ = New(100, "/foo", "")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_Exists(t *testing.T) {
|
||||||
|
t.Run("it exists", func(t *testing.T) {
|
||||||
|
d := newMockDisk(nil)
|
||||||
|
f, err := d.fs.Create("/disk.img")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
exists, err := d.Exists()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, exists)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("it does not exist", func(t *testing.T) {
|
||||||
|
d := newMockDisk(nil)
|
||||||
|
exists, err := d.Exists()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, exists)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("it reports errors", func(t *testing.T) {
|
||||||
|
d := newMockDisk(nil)
|
||||||
|
err := d.fs.Mkdir("/disk.img", 0600)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
exists, err := d.Exists()
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.False(t, exists)
|
||||||
|
assert.EqualError(t, err, ErrInvalidDiskPathTarget.Error())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_IsMounted(t *testing.T) {
|
||||||
|
t.Run("executes command and finds mounted disk", func(t *testing.T) {
|
||||||
|
is := assert.New(t)
|
||||||
|
var called bool
|
||||||
|
|
||||||
|
pctx := context.TODO()
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
called = true
|
||||||
|
is.Same(pctx, ctx)
|
||||||
|
is.Equal("grep", name)
|
||||||
|
is.Len(args, 3)
|
||||||
|
is.Equal([]string{"-qs", "/mnt ext4", "/proc/mounts"}, args)
|
||||||
|
|
||||||
|
return &mockCmd{}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
mnt, err := disk.IsMounted(pctx)
|
||||||
|
is.NoError(err)
|
||||||
|
is.True(mnt)
|
||||||
|
is.True(called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles exit code 1 gracefully", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
called = true
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
mnt, err := disk.IsMounted(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, mnt)
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles unexpected errors successfully", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 3}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
mnt, err := disk.IsMounted(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.False(t, mnt)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_Mount(t *testing.T) {
|
||||||
|
failedCmd := func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{run: func() error {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("error is returned if mount point is not a directory", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(failedCmd)
|
||||||
|
_, err := disk.fs.Create("/mnt")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = disk.Mount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, ErrMountPathNotDirectory.Error())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if mount point cannot be created", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(failedCmd)
|
||||||
|
disk.fs = afero.NewReadOnlyFs(disk.fs)
|
||||||
|
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "vhd: failed to create mount path: operation not permitted")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if already mounted", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(nil)
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, ErrFilesystemMounted.Error())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if mount command fails", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
},
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
called = true
|
||||||
|
|
||||||
|
assert.Equal(t, "mount", name)
|
||||||
|
assert.Equal(t, []string{"-t", "auto", "-o", "loop", "/disk.img", "/mnt"}, args)
|
||||||
|
|
||||||
|
return nil, &exec.ExitError{
|
||||||
|
ProcessState: &os.ProcessState{},
|
||||||
|
Stderr: []byte("foo bar.\n"),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "vhd: failed to mount disk: foo bar: exit status 0")
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("disk can be mounted at existing path", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(failedCmd)
|
||||||
|
require.NoError(t, disk.fs.Mkdir("/mnt", 0600))
|
||||||
|
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("disk can be mounted at non-existing path", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(failedCmd)
|
||||||
|
err := disk.Mount(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
st, err := disk.fs.Stat("/mnt")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, st.IsDir())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_Unmount(t *testing.T) {
|
||||||
|
t.Run("can unmount a disk", func(t *testing.T) {
|
||||||
|
is := assert.New(t)
|
||||||
|
pctx := context.TODO()
|
||||||
|
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
called = true
|
||||||
|
|
||||||
|
is.Same(pctx, ctx)
|
||||||
|
is.Equal("umount", name)
|
||||||
|
is.Equal([]string{"/mnt"}, args)
|
||||||
|
|
||||||
|
return &mockCmd{}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Unmount(pctx)
|
||||||
|
is.NoError(err)
|
||||||
|
is.True(called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles exit code 32 correctly", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 32}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Unmount(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("non code 32 errors are returned as error", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Unmount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("errors without ExitCode function are returned", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
return errors.New("foo bar")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.Unmount(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_Allocate(t *testing.T) {
|
||||||
|
t.Run("disk is unmounted before allocating space", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
called = true
|
||||||
|
assert.Equal(t, "fallocate", name)
|
||||||
|
assert.Equal(t, []string{"-l", "102400K", "/disk.img"}, args)
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.fs.Mkdir("/mnt", 0600)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = disk.Allocate(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("disk space is allocated even when not exists", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(nil)
|
||||||
|
err := disk.Allocate(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if command fails", func(t *testing.T) {
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
return nil, &exec.ExitError{
|
||||||
|
ProcessState: &os.ProcessState{},
|
||||||
|
Stderr: []byte("foo bar.\n"),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
_, err := disk.fs.Create("/disk.img")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = disk.Allocate(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, "vhd: failed to execute fallocate command: foo bar: exit status 0")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisk_MakeFilesystem(t *testing.T) {
|
||||||
|
t.Run("filesystem is created if not found in /etc/fstab", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
// Expect the call from IsMounted here and just return what we need
|
||||||
|
// to indicate that nothing is currently mounted.
|
||||||
|
if name == "grep" {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
}
|
||||||
|
called = true
|
||||||
|
assert.Equal(t, "mkfs", name)
|
||||||
|
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
return nil, errors.New("error: can't find in /etc/fstab foo bar testing")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.MakeFilesystem(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("filesystem is created if error is returned from mount command", func(t *testing.T) {
|
||||||
|
var called bool
|
||||||
|
var cmd CommanderProvider = func(ctx context.Context, name string, args ...string) Commander {
|
||||||
|
return &mockCmd{
|
||||||
|
run: func() error {
|
||||||
|
// Expect the call from IsMounted here and just return what we need
|
||||||
|
// to indicate that nothing is currently mounted.
|
||||||
|
if name == "grep" {
|
||||||
|
return &mockedExitCode{code: 1}
|
||||||
|
}
|
||||||
|
called = true
|
||||||
|
assert.Equal(t, "mkfs", name)
|
||||||
|
assert.Equal(t, []string{"-t", "ext4", "/disk.img"}, args)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
output: func() ([]byte, error) {
|
||||||
|
if name == "mount" {
|
||||||
|
return nil, &exec.ExitError{
|
||||||
|
Stderr: []byte("foo bar: exit status 32\n"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
disk := newMockDisk(cmd)
|
||||||
|
err := disk.MakeFilesystem(context.TODO())
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, called)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("error is returned if currently mounted", func(t *testing.T) {
|
||||||
|
disk := newMockDisk(nil)
|
||||||
|
err := disk.MakeFilesystem(context.TODO())
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.EqualError(t, err, ErrFilesystemExists.Error())
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -15,7 +15,7 @@ import (
|
|||||||
"github.com/icza/dyno"
|
"github.com/icza/dyno"
|
||||||
"github.com/magiconair/properties"
|
"github.com/magiconair/properties"
|
||||||
"gopkg.in/ini.v1"
|
"gopkg.in/ini.v1"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v3"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -10,11 +10,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/cenkalti/backoff/v4"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,6 +33,7 @@ type Client interface {
|
|||||||
SetInstallationStatus(ctx context.Context, uuid string, successful bool) error
|
SetInstallationStatus(ctx context.Context, uuid string, successful bool) error
|
||||||
SetTransferStatus(ctx context.Context, uuid string, successful bool) error
|
SetTransferStatus(ctx context.Context, uuid string, successful bool) error
|
||||||
ValidateSftpCredentials(ctx context.Context, request SftpAuthRequest) (SftpAuthResponse, error)
|
ValidateSftpCredentials(ctx context.Context, request SftpAuthRequest) (SftpAuthResponse, error)
|
||||||
|
SendActivityLogs(ctx context.Context, activity []models.Activity) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type client struct {
|
type client struct {
|
||||||
@@ -56,6 +60,18 @@ func New(base string, opts ...ClientOption) Client {
|
|||||||
return &c
|
return &c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewFromConfig returns a new Client using the configuration passed through
|
||||||
|
// by the caller.
|
||||||
|
func NewFromConfig(cfg *config.Configuration, opts ...ClientOption) Client {
|
||||||
|
passOpts := []ClientOption{
|
||||||
|
WithCredentials(cfg.AuthenticationTokenId, cfg.AuthenticationToken),
|
||||||
|
WithHttpClient(&http.Client{
|
||||||
|
Timeout: time.Second * time.Duration(cfg.RemoteQuery.Timeout),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
return New(cfg.PanelLocation, append(passOpts, opts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
// WithCredentials sets the credentials to use when making request to the remote
|
// WithCredentials sets the credentials to use when making request to the remote
|
||||||
// API endpoint.
|
// API endpoint.
|
||||||
func WithCredentials(id, token string) ClientOption {
|
func WithCredentials(id, token string) ClientOption {
|
||||||
@@ -128,10 +144,19 @@ func (c *client) requestOnce(ctx context.Context, method, path string, body io.R
|
|||||||
// and adds the required authentication headers to the request that is being
|
// and adds the required authentication headers to the request that is being
|
||||||
// created. Errors returned will be of the RequestError type if there was some
|
// created. Errors returned will be of the RequestError type if there was some
|
||||||
// type of response from the API that can be parsed.
|
// type of response from the API that can be parsed.
|
||||||
func (c *client) request(ctx context.Context, method, path string, body io.Reader, opts ...func(r *http.Request)) (*Response, error) {
|
func (c *client) request(ctx context.Context, method, path string, body *bytes.Buffer, opts ...func(r *http.Request)) (*Response, error) {
|
||||||
var res *Response
|
var res *Response
|
||||||
err := backoff.Retry(func() error {
|
err := backoff.Retry(func() error {
|
||||||
r, err := c.requestOnce(ctx, method, path, body, opts...)
|
var b bytes.Buffer
|
||||||
|
if body != nil {
|
||||||
|
// We have to create a copy of the body, otherwise attempting this request again will
|
||||||
|
// send no data if there was initially a body since the "requestOnce" method will read
|
||||||
|
// the whole buffer, thus leaving it empty at the end.
|
||||||
|
if _, err := b.Write(body.Bytes()); err != nil {
|
||||||
|
return backoff.Permanent(errors.Wrap(err, "http: failed to copy body buffer"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r, err := c.requestOnce(ctx, method, path, &b, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
return backoff.Permanent(err)
|
return backoff.Permanent(err)
|
||||||
@@ -142,12 +167,10 @@ func (c *client) request(ctx context.Context, method, path string, body io.Reade
|
|||||||
if r.HasError() {
|
if r.HasError() {
|
||||||
// Close the request body after returning the error to free up resources.
|
// Close the request body after returning the error to free up resources.
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
// Don't keep spamming the endpoint if we've already made too many requests or
|
// Don't keep attempting to access this endpoint if the response is a 4XX
|
||||||
// if we're not even authenticated correctly. Retrying generally won't fix either
|
// level error which indicates a client mistake. Only retry when the error
|
||||||
// of these issues.
|
// is due to a server issue (5XX error).
|
||||||
if r.StatusCode == http.StatusForbidden ||
|
if r.StatusCode >= 400 && r.StatusCode < 500 {
|
||||||
r.StatusCode == http.StatusTooManyRequests ||
|
|
||||||
r.StatusCode == http.StatusUnauthorized {
|
|
||||||
return backoff.Permanent(r.Error())
|
return backoff.Permanent(r.Error())
|
||||||
}
|
}
|
||||||
return r.Error()
|
return r.Error()
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
@@ -178,6 +180,16 @@ func (c *client) SendRestorationStatus(ctx context.Context, backup string, succe
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SendActivityLogs sends activity logs back to the Panel for processing.
|
||||||
|
func (c *client) SendActivityLogs(ctx context.Context, activity []models.Activity) error {
|
||||||
|
resp, err := c.Post(ctx, "/activity", d{"data": activity})
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
_ = resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// getServersPaged returns a subset of servers from the Panel API using the
|
// getServersPaged returns a subset of servers from the Panel API using the
|
||||||
// pagination query parameters.
|
// pagination query parameters.
|
||||||
func (c *client) getServersPaged(ctx context.Context, page, limit int) ([]RawServerData, Pagination, error) {
|
func (c *client) getServersPaged(ctx context.Context, page, limit int) ([]RawServerData, Pagination, error) {
|
||||||
|
|||||||
@@ -11,6 +11,11 @@ import (
|
|||||||
"github.com/pterodactyl/wings/parser"
|
"github.com/pterodactyl/wings/parser"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
SftpAuthPassword = SftpAuthRequestType("password")
|
||||||
|
SftpAuthPublicKey = SftpAuthRequestType("public_key")
|
||||||
|
)
|
||||||
|
|
||||||
// A generic type allowing for easy binding use when making requests to API
|
// A generic type allowing for easy binding use when making requests to API
|
||||||
// endpoints that only expect a singular argument or something that would not
|
// endpoints that only expect a singular argument or something that would not
|
||||||
// benefit from being a typed struct.
|
// benefit from being a typed struct.
|
||||||
@@ -63,15 +68,17 @@ type RawServerData struct {
|
|||||||
ProcessConfiguration json.RawMessage `json:"process_configuration"`
|
ProcessConfiguration json.RawMessage `json:"process_configuration"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SftpAuthRequestType string
|
||||||
|
|
||||||
// SftpAuthRequest defines the request details that are passed along to the Panel
|
// SftpAuthRequest defines the request details that are passed along to the Panel
|
||||||
// when determining if the credentials provided to Wings are valid.
|
// when determining if the credentials provided to Wings are valid.
|
||||||
type SftpAuthRequest struct {
|
type SftpAuthRequest struct {
|
||||||
|
Type SftpAuthRequestType `json:"type"`
|
||||||
User string `json:"username"`
|
User string `json:"username"`
|
||||||
Pass string `json:"password"`
|
Pass string `json:"password"`
|
||||||
IP string `json:"ip"`
|
IP string `json:"ip"`
|
||||||
SessionID []byte `json:"session_id"`
|
SessionID []byte `json:"session_id"`
|
||||||
ClientVersion []byte `json:"client_version"`
|
ClientVersion []byte `json:"client_version"`
|
||||||
Type string `json:"type"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SftpAuthResponse is returned by the Panel when a pair of SFTP credentials
|
// SftpAuthResponse is returned by the Panel when a pair of SFTP credentials
|
||||||
@@ -79,8 +86,8 @@ type SftpAuthRequest struct {
|
|||||||
// matched as well as the permissions that are assigned to the authenticated
|
// matched as well as the permissions that are assigned to the authenticated
|
||||||
// user for the SFTP subsystem.
|
// user for the SFTP subsystem.
|
||||||
type SftpAuthResponse struct {
|
type SftpAuthResponse struct {
|
||||||
SSHKeys []string `json:"ssh_keys"`
|
|
||||||
Server string `json:"server"`
|
Server string `json:"server"`
|
||||||
|
User string `json:"user"`
|
||||||
Permissions []string `json:"permissions"`
|
Permissions []string `json:"permissions"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,9 +157,15 @@ type BackupRemoteUploadResponse struct {
|
|||||||
PartSize int64 `json:"part_size"`
|
PartSize int64 `json:"part_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BackupPart struct {
|
||||||
|
ETag string `json:"etag"`
|
||||||
|
PartNumber int `json:"part_number"`
|
||||||
|
}
|
||||||
|
|
||||||
type BackupRequest struct {
|
type BackupRequest struct {
|
||||||
Checksum string `json:"checksum"`
|
Checksum string `json:"checksum"`
|
||||||
ChecksumType string `json:"checksum_type"`
|
ChecksumType string `json:"checksum_type"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Successful bool `json:"successful"`
|
Successful bool `json:"successful"`
|
||||||
|
Parts []BackupPart `json:"parts"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
"github.com/pterodactyl/wings/router/middleware"
|
"github.com/pterodactyl/wings/router/middleware"
|
||||||
wserver "github.com/pterodactyl/wings/server"
|
wserver "github.com/pterodactyl/wings/server"
|
||||||
@@ -15,6 +16,7 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
|
|||||||
|
|
||||||
router := gin.New()
|
router := gin.New()
|
||||||
router.Use(gin.Recovery())
|
router.Use(gin.Recovery())
|
||||||
|
router.SetTrustedProxies(config.Get().Api.TrustedProxies)
|
||||||
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
|
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
|
||||||
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
|
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
|
||||||
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/router/downloader"
|
"github.com/pterodactyl/wings/router/downloader"
|
||||||
"github.com/pterodactyl/wings/router/middleware"
|
"github.com/pterodactyl/wings/router/middleware"
|
||||||
"github.com/pterodactyl/wings/router/tokens"
|
"github.com/pterodactyl/wings/router/tokens"
|
||||||
@@ -180,7 +181,7 @@ func postServerReinstall(c *gin.Context) {
|
|||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deletes a server from the wings daemon and dissociate it's objects.
|
// Deletes a server from the wings daemon and dissociate its objects.
|
||||||
func deleteServer(c *gin.Context) {
|
func deleteServer(c *gin.Context) {
|
||||||
s := middleware.ExtractServer(c)
|
s := middleware.ExtractServer(c)
|
||||||
|
|
||||||
|
|||||||
@@ -13,6 +13,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
@@ -37,6 +39,15 @@ func getServerFileContents(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
// Don't allow a named pipe to be opened.
|
||||||
|
//
|
||||||
|
// @see https://github.com/pterodactyl/panel/issues/4059
|
||||||
|
if st.Mode()&os.ModeNamedPipe != 0 {
|
||||||
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||||
|
"error": "Cannot open files of this type.",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
c.Header("X-Mime-Type", st.Mimetype)
|
c.Header("X-Mime-Type", st.Mimetype)
|
||||||
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
||||||
@@ -122,6 +133,10 @@ func putServerRenameFiles(c *gin.Context) {
|
|||||||
// Return nil if the error is an is not exists.
|
// Return nil if the error is an is not exists.
|
||||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
s.Log().WithField("error", err).
|
||||||
|
WithField("from_path", pf).
|
||||||
|
WithField("to_path", pt).
|
||||||
|
Warn("failed to rename: source or target does not exist")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -587,6 +602,11 @@ func postServerUploadFiles(c *gin.Context) {
|
|||||||
if err := handleFileUpload(p, s, header); err != nil {
|
if err := handleFileUpload(p, s, header); err != nil {
|
||||||
NewServerError(err, s).Abort(c)
|
NewServerError(err, s).Abort(c)
|
||||||
return
|
return
|
||||||
|
} else {
|
||||||
|
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.ClientIP()), server.ActivityFileUploaded, models.ActivityMeta{
|
||||||
|
"file": header.Filename,
|
||||||
|
"directory": filepath.Clean(directory),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -604,6 +624,5 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader)
|
|||||||
if err := s.Filesystem().Writefile(p, file); err != nil {
|
if err := s.Filesystem().Writefile(p, file); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func getServerWebsocket(c *gin.Context) {
|
|||||||
ctx, cancel := context.WithCancel(c.Request.Context())
|
ctx, cancel := context.WithCancel(c.Request.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
handler, err := websocket.GetHandler(s, c.Writer, c.Request)
|
handler, err := websocket.GetHandler(s, c.Writer, c.Request, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
NewServerError(err, s).Abort(c)
|
NewServerError(err, s).Abort(c)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
@@ -30,19 +29,9 @@ import (
|
|||||||
"github.com/pterodactyl/wings/router/tokens"
|
"github.com/pterodactyl/wings/router/tokens"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"github.com/pterodactyl/wings/server/filesystem"
|
"github.com/pterodactyl/wings/server/filesystem"
|
||||||
"github.com/pterodactyl/wings/system"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Number of ticks in the progress bar
|
const progressWidth = 25
|
||||||
const ticks = 25
|
|
||||||
|
|
||||||
// 100% / number of ticks = percentage represented by each tick
|
|
||||||
const tickPercentage = 100 / ticks
|
|
||||||
|
|
||||||
type downloadProgress struct {
|
|
||||||
size int64
|
|
||||||
progress int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data passed over to initiate a server transfer.
|
// Data passed over to initiate a server transfer.
|
||||||
type serverTransferRequest struct {
|
type serverTransferRequest struct {
|
||||||
@@ -95,7 +84,7 @@ func getServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute sha1 checksum.
|
// Compute sha256 checksum.
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
f, err := os.Open(archivePath)
|
f, err := os.Open(archivePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -184,11 +173,35 @@ func postServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get the disk usage of the server (used to calculate the progress of the archive process)
|
||||||
|
rawSize, err := s.Filesystem().DiskUsage(true)
|
||||||
|
if err != nil {
|
||||||
|
sendTransferLog("Failed to get disk usage for server, aborting transfer..")
|
||||||
|
l.WithField("error", err).Error("failed to get disk usage for server")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Create an archive of the entire server's data directory.
|
// Create an archive of the entire server's data directory.
|
||||||
a := &filesystem.Archive{
|
a := &filesystem.Archive{
|
||||||
BasePath: s.Filesystem().Path(),
|
BasePath: s.Filesystem().Path(),
|
||||||
|
Progress: filesystem.NewProgress(rawSize),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send the archive progress to the websocket every 3 seconds.
|
||||||
|
ctx, cancel := context.WithCancel(s.Context())
|
||||||
|
defer cancel()
|
||||||
|
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
|
||||||
|
defer t.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-t.C:
|
||||||
|
sendTransferLog("Archiving " + p.Progress(progressWidth))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(ctx, a.Progress, time.NewTicker(5*time.Second))
|
||||||
|
|
||||||
// Attempt to get an archive of the server.
|
// Attempt to get an archive of the server.
|
||||||
if err := a.Create(getArchivePath(s.ID())); err != nil {
|
if err := a.Create(getArchivePath(s.ID())); err != nil {
|
||||||
sendTransferLog("An error occurred while archiving the server: " + err.Error())
|
sendTransferLog("An error occurred while archiving the server: " + err.Error())
|
||||||
@@ -196,6 +209,12 @@ func postServerArchive(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Cancel the progress ticker.
|
||||||
|
cancel()
|
||||||
|
|
||||||
|
// Show 100% completion.
|
||||||
|
sendTransferLog("Archiving " + a.Progress.Progress(progressWidth))
|
||||||
|
|
||||||
sendTransferLog("Successfully created archive, attempting to notify panel..")
|
sendTransferLog("Successfully created archive, attempting to notify panel..")
|
||||||
l.Info("successfully created server transfer archive, notifying panel..")
|
l.Info("successfully created server transfer archive, notifying panel..")
|
||||||
|
|
||||||
@@ -223,12 +242,6 @@ func postServerArchive(c *gin.Context) {
|
|||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *downloadProgress) Write(v []byte) (int, error) {
|
|
||||||
n := len(v)
|
|
||||||
atomic.AddInt64(&w.progress, int64(n))
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log helper function to attach all errors and info output to a consistently formatted
|
// Log helper function to attach all errors and info output to a consistently formatted
|
||||||
// log string for easier querying.
|
// log string for easier querying.
|
||||||
func (str serverTransferRequest) log() *log.Entry {
|
func (str serverTransferRequest) log() *log.Entry {
|
||||||
@@ -321,7 +334,7 @@ func postTransfer(c *gin.Context) {
|
|||||||
manager := middleware.ExtractManager(c)
|
manager := middleware.ExtractManager(c)
|
||||||
u, err := uuid.Parse(data.ServerID)
|
u, err := uuid.Parse(data.ServerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
WithError(c, err)
|
_ = WithError(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Force the server ID to be a valid UUID string at this point. If it is not an error
|
// Force the server ID to be a valid UUID string at this point. If it is not an error
|
||||||
@@ -331,11 +344,12 @@ func postTransfer(c *gin.Context) {
|
|||||||
|
|
||||||
data.log().Info("handling incoming server transfer request")
|
data.log().Info("handling incoming server transfer request")
|
||||||
go func(data *serverTransferRequest) {
|
go func(data *serverTransferRequest) {
|
||||||
|
ctx := context.Background()
|
||||||
hasError := true
|
hasError := true
|
||||||
|
|
||||||
// Create a new server installer. This will only configure the environment and not
|
// Create a new server installer. This will only configure the environment and not
|
||||||
// run the installer scripts.
|
// run the installer scripts.
|
||||||
i, err := installer.New(context.Background(), manager, data.Server)
|
i, err := installer.New(ctx, manager, data.Server)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = data.sendTransferStatus(manager.Client(), false)
|
_ = data.sendTransferStatus(manager.Client(), false)
|
||||||
data.log().WithField("error", err).Error("failed to validate received server data")
|
data.log().WithField("error", err).Error("failed to validate received server data")
|
||||||
@@ -407,25 +421,22 @@ func postTransfer(c *gin.Context) {
|
|||||||
sendTransferLog("Writing archive to disk...")
|
sendTransferLog("Writing archive to disk...")
|
||||||
data.log().Info("writing transfer archive to disk...")
|
data.log().Info("writing transfer archive to disk...")
|
||||||
|
|
||||||
// Copy the file.
|
progress := filesystem.NewProgress(size)
|
||||||
progress := &downloadProgress{size: size}
|
|
||||||
ticker := time.NewTicker(3 * time.Second)
|
// Send the archive progress to the websocket every 3 seconds.
|
||||||
go func(progress *downloadProgress, t *time.Ticker) {
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
for range ticker.C {
|
defer cancel()
|
||||||
// p = 100 (Downloaded)
|
go func(ctx context.Context, p *filesystem.Progress, t *time.Ticker) {
|
||||||
// size = 1000 (Content-Length)
|
defer t.Stop()
|
||||||
// p / size = 0.1
|
for {
|
||||||
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
select {
|
||||||
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
case <-ctx.Done():
|
||||||
// 2.5 (Number of ticks as a float64)
|
return
|
||||||
// 2 (convert to an integer)
|
case <-t.C:
|
||||||
p := atomic.LoadInt64(&progress.progress)
|
sendTransferLog("Downloading " + p.Progress(progressWidth))
|
||||||
// We have to cast these numbers to float in order to get a float result from the division.
|
|
||||||
width := ((float64(p) / float64(size)) * 100) / tickPercentage
|
|
||||||
bar := strings.Repeat("=", int(width)) + strings.Repeat(" ", ticks-int(width))
|
|
||||||
sendTransferLog("Downloading [" + bar + "] " + system.FormatBytes(p) + " / " + system.FormatBytes(progress.size))
|
|
||||||
}
|
}
|
||||||
}(progress, ticker)
|
}
|
||||||
|
}(ctx, progress, time.NewTicker(5*time.Second))
|
||||||
|
|
||||||
var reader io.Reader
|
var reader io.Reader
|
||||||
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
|
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
|
||||||
@@ -438,18 +449,16 @@ func postTransfer(c *gin.Context) {
|
|||||||
|
|
||||||
buf := make([]byte, 1024*4)
|
buf := make([]byte, 1024*4)
|
||||||
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
|
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
|
||||||
ticker.Stop()
|
|
||||||
_ = file.Close()
|
_ = file.Close()
|
||||||
|
|
||||||
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
|
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
|
||||||
data.log().WithField("error", err).Error("failed to copy archive file to disk")
|
data.log().WithField("error", err).Error("failed to copy archive file to disk")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ticker.Stop()
|
cancel()
|
||||||
|
|
||||||
// Show 100% completion.
|
// Show 100% completion.
|
||||||
humanSize := system.FormatBytes(progress.size)
|
sendTransferLog("Downloading " + progress.Progress(progressWidth))
|
||||||
sendTransferLog("Downloading [" + strings.Repeat("=", ticks) + "] " + humanSize + " / " + humanSize)
|
|
||||||
|
|
||||||
if err := file.Close(); err != nil {
|
if err := file.Close(); err != nil {
|
||||||
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")
|
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ type UploadPayload struct {
|
|||||||
jwt.Payload
|
jwt.Payload
|
||||||
|
|
||||||
ServerUuid string `json:"server_uuid"`
|
ServerUuid string `json:"server_uuid"`
|
||||||
|
UserUuid string `json:"user_uuid"`
|
||||||
UniqueId string `json:"unique_id"`
|
UniqueId string `json:"unique_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// The time at which Wings was booted. No JWT's created before this time are allowed to
|
// The time at which Wings was booted. No JWT's created before this time are allowed to
|
||||||
@@ -35,13 +34,13 @@ func DenyJTI(jti string) {
|
|||||||
denylist.Store(jti, time.Now())
|
denylist.Store(jti, time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
// A JWT payload for Websocket connections. This JWT is passed along to the Websocket after
|
// WebsocketPayload defines the JWT payload for a websocket connection. This JWT is passed along to
|
||||||
// it has been connected to by sending an "auth" event.
|
// the websocket after it has been connected to by sending an "auth" event.
|
||||||
type WebsocketPayload struct {
|
type WebsocketPayload struct {
|
||||||
jwt.Payload
|
jwt.Payload
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
UserID json.Number `json:"user_id"`
|
UserUUID string `json:"user_uuid"`
|
||||||
ServerUUID string `json:"server_uuid"`
|
ServerUUID string `json:"server_uuid"`
|
||||||
Permissions []string `json:"permissions"`
|
Permissions []string `json:"permissions"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
|
|
||||||
@@ -164,5 +165,6 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,12 +8,16 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
@@ -40,6 +44,7 @@ type Handler struct {
|
|||||||
Connection *websocket.Conn `json:"-"`
|
Connection *websocket.Conn `json:"-"`
|
||||||
jwt *tokens.WebsocketPayload
|
jwt *tokens.WebsocketPayload
|
||||||
server *server.Server
|
server *server.Server
|
||||||
|
ra server.RequestActivity
|
||||||
uuid uuid.UUID
|
uuid uuid.UUID
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,7 +82,7 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetHandler returns a new websocket handler using the context provided.
|
// GetHandler returns a new websocket handler using the context provided.
|
||||||
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Handler, error) {
|
func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request, c *gin.Context) (*Handler, error) {
|
||||||
upgrader := websocket.Upgrader{
|
upgrader := websocket.Upgrader{
|
||||||
// Ensure that the websocket request is originating from the Panel itself,
|
// Ensure that the websocket request is originating from the Panel itself,
|
||||||
// and not some other location.
|
// and not some other location.
|
||||||
@@ -109,6 +114,7 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
|
|||||||
Connection: conn,
|
Connection: conn,
|
||||||
jwt: nil,
|
jwt: nil,
|
||||||
server: s,
|
server: s,
|
||||||
|
ra: s.NewRequestActivity("", c.ClientIP()),
|
||||||
uuid: u,
|
uuid: u,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@@ -264,6 +270,7 @@ func (h *Handler) GetJwt() *tokens.WebsocketPayload {
|
|||||||
// setJwt sets the JWT for the websocket in a race-safe manner.
|
// setJwt sets the JWT for the websocket in a race-safe manner.
|
||||||
func (h *Handler) setJwt(token *tokens.WebsocketPayload) {
|
func (h *Handler) setJwt(token *tokens.WebsocketPayload) {
|
||||||
h.Lock()
|
h.Lock()
|
||||||
|
h.ra = h.ra.SetUser(token.UserUUID)
|
||||||
h.jwt = token
|
h.jwt = token
|
||||||
h.Unlock()
|
h.Unlock()
|
||||||
}
|
}
|
||||||
@@ -365,6 +372,10 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
h.server.SaveActivity(h.ra, models.Event(server.ActivityPowerPrefix+action), nil)
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case SendServerLogsEvent:
|
case SendServerLogsEvent:
|
||||||
@@ -421,7 +432,13 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.server.Environment.SendCommand(strings.Join(m.Args, ""))
|
if err := h.server.Environment.SendCommand(strings.Join(m.Args, "")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
h.server.SaveActivity(h.ra, server.ActivityConsoleCommand, models.ActivityMeta{
|
||||||
|
"command": strings.Join(m.Args, ""),
|
||||||
|
})
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
Name: ptero-wings
|
Name: ptero-wings
|
||||||
Version: 1.5.3
|
Version: 1.7.0
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
|
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
|
||||||
BuildArch: x86_64
|
BuildArch: x86_64
|
||||||
@@ -91,6 +91,9 @@ rm -rf /var/log/pterodactyl
|
|||||||
wings --version
|
wings --version
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
|
* Wed Sep 14 2022 Chance Callahan <ccallaha@redhat.com> - 1.7.0-1
|
||||||
|
- Updating specfile to match stable release.
|
||||||
|
|
||||||
* Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1
|
* Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1
|
||||||
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
|
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
|
||||||
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3
|
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3
|
||||||
|
|||||||
66
server/activity.go
Normal file
66
server/activity.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
const ActivityPowerPrefix = "server:power."
|
||||||
|
|
||||||
|
const (
|
||||||
|
ActivityConsoleCommand = models.Event("server:console.command")
|
||||||
|
ActivitySftpWrite = models.Event("server:sftp.write")
|
||||||
|
ActivitySftpCreate = models.Event("server:sftp.create")
|
||||||
|
ActivitySftpCreateDirectory = models.Event("server:sftp.create-directory")
|
||||||
|
ActivitySftpRename = models.Event("server:sftp.rename")
|
||||||
|
ActivitySftpDelete = models.Event("server:sftp.delete")
|
||||||
|
ActivityFileUploaded = models.Event("server:file.uploaded")
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestActivity is a wrapper around a LoggedEvent that is able to track additional request
|
||||||
|
// specific metadata including the specific user and IP address associated with all subsequent
|
||||||
|
// events. The internal logged event structure can be extracted by calling RequestEvent.Event().
|
||||||
|
type RequestActivity struct {
|
||||||
|
server string
|
||||||
|
user string
|
||||||
|
ip string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Event returns the underlying logged event from the RequestEvent instance and sets the
|
||||||
|
// specific event and metadata on it.
|
||||||
|
func (ra RequestActivity) Event(event models.Event, metadata models.ActivityMeta) *models.Activity {
|
||||||
|
a := models.Activity{Server: ra.server, IP: ra.ip, Event: event, Metadata: metadata}
|
||||||
|
|
||||||
|
return a.SetUser(ra.user)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser clones the RequestActivity struct and sets a new user value on the copy
|
||||||
|
// before returning it.
|
||||||
|
func (ra RequestActivity) SetUser(u string) RequestActivity {
|
||||||
|
c := ra
|
||||||
|
c.user = u
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) NewRequestActivity(user string, ip string) RequestActivity {
|
||||||
|
return RequestActivity{server: s.ID(), user: user, ip: ip}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveActivity saves an activity entry to the database in a background routine. If an error is
|
||||||
|
// encountered it is logged but not returned to the caller.
|
||||||
|
func (s *Server) SaveActivity(a RequestActivity, event models.Event, metadata models.ActivityMeta) {
|
||||||
|
ctx, cancel := context.WithTimeout(s.Context(), time.Second*3)
|
||||||
|
go func() {
|
||||||
|
defer cancel()
|
||||||
|
if tx := database.Instance().WithContext(ctx).Create(a.Event(event, metadata)); tx.Error != nil {
|
||||||
|
s.Log().WithField("error", errors.WithStack(tx.Error)).
|
||||||
|
WithField("event", event).
|
||||||
|
Error("activity: failed to save event")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
@@ -32,7 +32,7 @@ type RestoreCallback func(file string, r io.Reader, mode fs.FileMode, atime, mti
|
|||||||
// noinspection GoNameStartsWithPackageName
|
// noinspection GoNameStartsWithPackageName
|
||||||
type BackupInterface interface {
|
type BackupInterface interface {
|
||||||
// SetClient sets the API request client on the backup interface.
|
// SetClient sets the API request client on the backup interface.
|
||||||
SetClient(c remote.Client)
|
SetClient(remote.Client)
|
||||||
// Identifier returns the UUID of this backup as tracked by the panel
|
// Identifier returns the UUID of this backup as tracked by the panel
|
||||||
// instance.
|
// instance.
|
||||||
Identifier() string
|
Identifier() string
|
||||||
@@ -41,7 +41,7 @@ type BackupInterface interface {
|
|||||||
WithLogContext(map[string]interface{})
|
WithLogContext(map[string]interface{})
|
||||||
// Generate creates a backup in whatever the configured source for the
|
// Generate creates a backup in whatever the configured source for the
|
||||||
// specific implementation is.
|
// specific implementation is.
|
||||||
Generate(ctx context.Context, basePath string, ignore string) (*ArchiveDetails, error)
|
Generate(context.Context, string, string) (*ArchiveDetails, error)
|
||||||
// Ignored returns the ignored files for this backup instance.
|
// Ignored returns the ignored files for this backup instance.
|
||||||
Ignored() string
|
Ignored() string
|
||||||
// Checksum returns a SHA1 checksum for the generated backup.
|
// Checksum returns a SHA1 checksum for the generated backup.
|
||||||
@@ -53,13 +53,13 @@ type BackupInterface interface {
|
|||||||
// to store it until it is moved to the final spot.
|
// to store it until it is moved to the final spot.
|
||||||
Path() string
|
Path() string
|
||||||
// Details returns details about the archive.
|
// Details returns details about the archive.
|
||||||
Details(ctx context.Context) (*ArchiveDetails, error)
|
Details(context.Context, []remote.BackupPart) (*ArchiveDetails, error)
|
||||||
// Remove removes a backup file.
|
// Remove removes a backup file.
|
||||||
Remove() error
|
Remove() error
|
||||||
// Restore is called when a backup is ready to be restored to the disk from
|
// Restore is called when a backup is ready to be restored to the disk from
|
||||||
// the given source. Not every backup implementation will support this nor
|
// the given source. Not every backup implementation will support this nor
|
||||||
// will every implementation require a reader be provided.
|
// will every implementation require a reader be provided.
|
||||||
Restore(ctx context.Context, reader io.Reader, callback RestoreCallback) error
|
Restore(context.Context, io.Reader, RestoreCallback) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type Backup struct {
|
type Backup struct {
|
||||||
@@ -119,8 +119,8 @@ func (b *Backup) Checksum() ([]byte, error) {
|
|||||||
|
|
||||||
// Details returns both the checksum and size of the archive currently stored on
|
// Details returns both the checksum and size of the archive currently stored on
|
||||||
// the disk to the caller.
|
// the disk to the caller.
|
||||||
func (b *Backup) Details(ctx context.Context) (*ArchiveDetails, error) {
|
func (b *Backup) Details(ctx context.Context, parts []remote.BackupPart) (*ArchiveDetails, error) {
|
||||||
ad := ArchiveDetails{ChecksumType: "sha1"}
|
ad := ArchiveDetails{ChecksumType: "sha1", Parts: parts}
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
@@ -165,6 +165,7 @@ type ArchiveDetails struct {
|
|||||||
Checksum string `json:"checksum"`
|
Checksum string `json:"checksum"`
|
||||||
ChecksumType string `json:"checksum_type"`
|
ChecksumType string `json:"checksum_type"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
|
Parts []remote.BackupPart `json:"parts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToRequest returns a request object.
|
// ToRequest returns a request object.
|
||||||
@@ -174,5 +175,6 @@ func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
|
|||||||
ChecksumType: ad.ChecksumType,
|
ChecksumType: ad.ChecksumType,
|
||||||
Size: ad.Size,
|
Size: ad.Size,
|
||||||
Successful: successful,
|
Successful: successful,
|
||||||
|
Parts: ad.Parts,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
|
|||||||
}
|
}
|
||||||
b.log().Info("created backup successfully")
|
b.log().Info("created backup successfully")
|
||||||
|
|
||||||
ad, err := b.Details(ctx)
|
ad, err := b.Details(ctx, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
|
return nil, errors.WrapIf(err, "backup: failed to get archive details for local backup")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -71,10 +71,11 @@ func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*Arch
|
|||||||
}
|
}
|
||||||
defer rc.Close()
|
defer rc.Close()
|
||||||
|
|
||||||
if err := s.generateRemoteRequest(ctx, rc); err != nil {
|
parts, err := s.generateRemoteRequest(ctx, rc)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ad, err := s.Details(ctx)
|
ad, err := s.Details(ctx, parts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
|
return nil, errors.WrapIf(err, "backup: failed to get archive details after upload")
|
||||||
}
|
}
|
||||||
@@ -125,20 +126,20 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Generates the remote S3 request and begins the upload.
|
// Generates the remote S3 request and begins the upload.
|
||||||
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) error {
|
func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser) ([]remote.BackupPart, error) {
|
||||||
defer rc.Close()
|
defer rc.Close()
|
||||||
|
|
||||||
s.log().Debug("attempting to get size of backup...")
|
s.log().Debug("attempting to get size of backup...")
|
||||||
size, err := s.Backup.Size()
|
size, err := s.Backup.Size()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
s.log().WithField("size", size).Debug("got size of backup")
|
s.log().WithField("size", size).Debug("got size of backup")
|
||||||
|
|
||||||
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
||||||
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
s.log().Debug("got S3 upload urls from the Panel")
|
s.log().Debug("got S3 upload urls from the Panel")
|
||||||
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
||||||
@@ -156,22 +157,26 @@ func (s *S3Backup) generateRemoteRequest(ctx context.Context, rc io.ReadCloser)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to upload the part.
|
// Attempt to upload the part.
|
||||||
if _, err := uploader.uploadPart(ctx, part, partSize); err != nil {
|
etag, err := uploader.uploadPart(ctx, part, partSize)
|
||||||
|
if err != nil {
|
||||||
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
uploader.uploadedParts = append(uploader.uploadedParts, remote.BackupPart{
|
||||||
|
ETag: etag,
|
||||||
|
PartNumber: i + 1,
|
||||||
|
})
|
||||||
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
||||||
|
|
||||||
return nil
|
return uploader.uploadedParts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type s3FileUploader struct {
|
type s3FileUploader struct {
|
||||||
io.ReadCloser
|
io.ReadCloser
|
||||||
client *http.Client
|
client *http.Client
|
||||||
|
uploadedParts []remote.BackupPart
|
||||||
}
|
}
|
||||||
|
|
||||||
// newS3FileUploader returns a new file uploader instance.
|
// newS3FileUploader returns a new file uploader instance.
|
||||||
|
|||||||
@@ -16,6 +16,11 @@ type EggConfiguration struct {
|
|||||||
FileDenylist []string `json:"file_denylist"`
|
FileDenylist []string `json:"file_denylist"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ConfigurationMeta struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
}
|
||||||
|
|
||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
|
||||||
@@ -24,6 +29,8 @@ type Configuration struct {
|
|||||||
// docker containers as well as in log output.
|
// docker containers as well as in log output.
|
||||||
Uuid string `json:"uuid"`
|
Uuid string `json:"uuid"`
|
||||||
|
|
||||||
|
Meta ConfigurationMeta `json:"meta"`
|
||||||
|
|
||||||
// Whether or not the server is in a suspended state. Suspended servers cannot
|
// Whether or not the server is in a suspended state. Suspended servers cannot
|
||||||
// be started or modified except in certain scenarios by an admin user.
|
// be started or modified except in certain scenarios by an admin user.
|
||||||
Suspended bool `json:"suspended"`
|
Suspended bool `json:"suspended"`
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/mitchellh/colorstring"
|
"github.com/mitchellh/colorstring"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
@@ -17,6 +18,7 @@ import (
|
|||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
const memory = 4 * 1024
|
const memory = 4 * 1024
|
||||||
@@ -28,6 +30,62 @@ var pool = sync.Pool{
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Progress is used to track the progress of any I/O operation that are being
|
||||||
|
// performed.
|
||||||
|
type Progress struct {
|
||||||
|
// written is the total size of the files that have been written to the writer.
|
||||||
|
written int64
|
||||||
|
// Total is the total size of the archive in bytes.
|
||||||
|
total int64
|
||||||
|
// w .
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProgress .
|
||||||
|
func NewProgress(total int64) *Progress {
|
||||||
|
return &Progress{total: total}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Written returns the total number of bytes written.
|
||||||
|
// This function should be used when the progress is tracking data being written.
|
||||||
|
func (p *Progress) Written() int64 {
|
||||||
|
return atomic.LoadInt64(&p.written)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total returns the total size in bytes.
|
||||||
|
func (p *Progress) Total() int64 {
|
||||||
|
return atomic.LoadInt64(&p.total)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write totals the number of bytes that have been written to the writer.
|
||||||
|
func (p *Progress) Write(v []byte) (int, error) {
|
||||||
|
n := len(v)
|
||||||
|
atomic.AddInt64(&p.written, int64(n))
|
||||||
|
if p.w != nil {
|
||||||
|
return p.w.Write(v)
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Progress returns a formatted progress string for the current progress.
|
||||||
|
func (p *Progress) Progress(width int) string {
|
||||||
|
current := p.Written()
|
||||||
|
total := p.Total()
|
||||||
|
|
||||||
|
// v = 100 (Progress)
|
||||||
|
// size = 1000 (Content-Length)
|
||||||
|
// p / size = 0.1
|
||||||
|
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
||||||
|
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
||||||
|
// 2.5 (Number of ticks as a float64)
|
||||||
|
// 2 (convert to an integer)
|
||||||
|
|
||||||
|
// We have to cast these numbers to float in order to get a float result from the division.
|
||||||
|
ticks := ((float64(current) / float64(total)) * 100) / (float64(100) / float64(width))
|
||||||
|
bar := strings.Repeat("=", int(ticks)) + strings.Repeat(" ", width-int(ticks))
|
||||||
|
return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total)
|
||||||
|
}
|
||||||
|
|
||||||
type Archive struct {
|
type Archive struct {
|
||||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
||||||
// relative to.
|
// relative to.
|
||||||
@@ -40,10 +98,13 @@ type Archive struct {
|
|||||||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
||||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
||||||
Files []string
|
Files []string
|
||||||
|
|
||||||
|
// Progress wraps the writer of the archive to pass through the progress tracker.
|
||||||
|
Progress *Progress
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates an archive at dst with all of the files defined in the
|
// Create creates an archive at dst with all the files defined in the
|
||||||
// included files struct.
|
// included Files array.
|
||||||
func (a *Archive) Create(dst string) error {
|
func (a *Archive) Create(dst string) error {
|
||||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -62,13 +123,34 @@ func (a *Archive) Create(dst string) error {
|
|||||||
writer = f
|
writer = f
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Choose which compression level to use based on the compression_level configuration option
|
||||||
|
var compressionLevel int
|
||||||
|
switch config.Get().System.Backups.CompressionLevel {
|
||||||
|
case "none":
|
||||||
|
compressionLevel = pgzip.NoCompression
|
||||||
|
case "best_compression":
|
||||||
|
compressionLevel = pgzip.BestCompression
|
||||||
|
case "best_speed":
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
compressionLevel = pgzip.BestSpeed
|
||||||
|
}
|
||||||
|
|
||||||
// Create a new gzip writer around the file.
|
// Create a new gzip writer around the file.
|
||||||
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
|
gw, _ := pgzip.NewWriterLevel(writer, compressionLevel)
|
||||||
_ = gw.SetConcurrency(1<<20, 1)
|
_ = gw.SetConcurrency(1<<20, 1)
|
||||||
defer gw.Close()
|
defer gw.Close()
|
||||||
|
|
||||||
|
var pw io.Writer
|
||||||
|
if a.Progress != nil {
|
||||||
|
a.Progress.w = gw
|
||||||
|
pw = a.Progress
|
||||||
|
} else {
|
||||||
|
pw = gw
|
||||||
|
}
|
||||||
|
|
||||||
// Create a new tar writer around the gzip writer.
|
// Create a new tar writer around the gzip writer.
|
||||||
tw := tar.NewWriter(gw)
|
tw := tar.NewWriter(pw)
|
||||||
defer tw.Close()
|
defer tw.Close()
|
||||||
|
|
||||||
// Configure godirwalk.
|
// Configure godirwalk.
|
||||||
@@ -103,7 +185,7 @@ func (a *Archive) Create(dst string) error {
|
|||||||
// being generated.
|
// being generated.
|
||||||
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
||||||
return func(path string, de *godirwalk.Dirent) error {
|
return func(path string, de *godirwalk.Dirent) error {
|
||||||
// Skip directories because we walking them recursively.
|
// Skip directories because we are walking them recursively.
|
||||||
if de.IsDir() {
|
if de.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -130,7 +212,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
|
|||||||
for _, f := range a.Files {
|
for _, f := range a.Files {
|
||||||
// If the given doesn't match, or doesn't have the same prefix continue
|
// If the given doesn't match, or doesn't have the same prefix continue
|
||||||
// to the next item in the loop.
|
// to the next item in the loop.
|
||||||
if p != f && !strings.HasPrefix(p, f) {
|
if p != f && !strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", f) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,7 +230,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
|
|||||||
// Adds a given file path to the final archive being created.
|
// Adds a given file path to the final archive being created.
|
||||||
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
||||||
// Lstat the file, this will give us the same information as Stat except that it will not
|
// Lstat the file, this will give us the same information as Stat except that it will not
|
||||||
// follow a symlink to it's target automatically. This is important to avoid including
|
// follow a symlink to its target automatically. This is important to avoid including
|
||||||
// files that exist outside the server root unintentionally in the backup.
|
// files that exist outside the server root unintentionally in the backup.
|
||||||
s, err := os.Lstat(p)
|
s, err := os.Lstat(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -8,10 +8,14 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
gzip2 "github.com/klauspost/compress/gzip"
|
||||||
|
zip2 "github.com/klauspost/compress/zip"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/mholt/archiver/v3"
|
"github.com/mholt/archiver/v3"
|
||||||
)
|
)
|
||||||
@@ -172,13 +176,26 @@ func ExtractNameFromArchive(f archiver.File) string {
|
|||||||
return f.Name()
|
return f.Name()
|
||||||
}
|
}
|
||||||
switch s := sys.(type) {
|
switch s := sys.(type) {
|
||||||
|
case *zip.FileHeader:
|
||||||
|
return s.Name
|
||||||
|
case *zip2.FileHeader:
|
||||||
|
return s.Name
|
||||||
case *tar.Header:
|
case *tar.Header:
|
||||||
return s.Name
|
return s.Name
|
||||||
case *gzip.Header:
|
case *gzip.Header:
|
||||||
return s.Name
|
return s.Name
|
||||||
case *zip.FileHeader:
|
case *gzip2.Header:
|
||||||
return s.Name
|
return s.Name
|
||||||
default:
|
default:
|
||||||
|
// At this point we cannot figure out what type of archive this might be so
|
||||||
|
// just try to find the name field in the struct. If it is found return it.
|
||||||
|
field := reflect.Indirect(reflect.ValueOf(sys)).FieldByName("Name")
|
||||||
|
if field.IsValid() {
|
||||||
|
return field.String()
|
||||||
|
}
|
||||||
|
// Fallback to the basename of the file at this point. There is nothing we can really
|
||||||
|
// do to try and figure out what the underlying directory of the file is supposed to
|
||||||
|
// be since it didn't implement a name field.
|
||||||
return f.Name()
|
return f.Name()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/pterodactyl/wings/internal/vhd"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
"syscall"
|
||||||
@@ -35,18 +37,46 @@ func (ult *usageLookupTime) Get() time.Time {
|
|||||||
return ult.value
|
return ult.value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
|
// MaxDisk returns the maximum amount of disk space that this Filesystem
|
||||||
|
// instance is allowed to use.
|
||||||
func (fs *Filesystem) MaxDisk() int64 {
|
func (fs *Filesystem) MaxDisk() int64 {
|
||||||
return atomic.LoadInt64(&fs.diskLimit)
|
return atomic.LoadInt64(&fs.diskLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets the disk space limit for this Filesystem instance.
|
// SetDiskLimit sets the disk space limit for this Filesystem instance. This
|
||||||
func (fs *Filesystem) SetDiskLimit(i int64) {
|
// logic will also handle mounting or unmounting a virtual disk if it is being
|
||||||
atomic.SwapInt64(&fs.diskLimit, i)
|
// used currently.
|
||||||
|
func (fs *Filesystem) SetDiskLimit(ctx context.Context, i int64) error {
|
||||||
|
// Do nothing if this method is called but the limit is not changing.
|
||||||
|
if atomic.LoadInt64(&fs.diskLimit) == i {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if vhd.Enabled() {
|
||||||
|
if i == 0 && fs.IsVirtual() {
|
||||||
|
fs.log().Debug("disk limit changed to 0, destroying virtual disk")
|
||||||
|
// Remove the VHD if it is mounted so that we're just storing files directly on the system
|
||||||
|
// since we cannot have a virtual disk with a space limit enforced like that.
|
||||||
|
if err := fs.vhd.Destroy(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
fs.vhd = nil
|
||||||
|
}
|
||||||
|
// If we're setting a disk size go ahead and mount the VHD if it isn't already mounted,
|
||||||
|
// and then allocate the new space to the disk.
|
||||||
|
if i > 0 {
|
||||||
|
fs.log().Debug("disk limit updated, allocating new space to virtual disk")
|
||||||
|
if err := fs.ConfigureDisk(ctx, i); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fs.log().WithField("limit", i).Debug("disk limit updated")
|
||||||
|
atomic.StoreInt64(&fs.diskLimit, i)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// The same concept as HasSpaceAvailable however this will return an error if there is
|
// HasSpaceErr is the same concept as HasSpaceAvailable however this will return
|
||||||
// no space, rather than a boolean value.
|
// an error if there is no space, rather than a boolean value.
|
||||||
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
||||||
if !fs.HasSpaceAvailable(allowStaleValue) {
|
if !fs.HasSpaceAvailable(allowStaleValue) {
|
||||||
return newFilesystemError(ErrCodeDiskSpace, nil)
|
return newFilesystemError(ErrCodeDiskSpace, nil)
|
||||||
@@ -54,67 +84,77 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines if the directory a file is trying to be added to has enough space available
|
// HasSpaceAvailable determines if the directory a file is trying to be added to
|
||||||
// for the file to be written to.
|
// has enough space available for the file to be written to.
|
||||||
//
|
//
|
||||||
// Because determining the amount of space being used by a server is a taxing operation we
|
// Because determining the amount of space being used by a server is a taxing
|
||||||
// will load it all up into a cache and pull from that as long as the key is not expired.
|
// operation we will load it all up into a cache and pull from that as long as
|
||||||
|
// the key is not expired. This operation will potentially block unless
|
||||||
|
// allowStaleValue is set to true. See the documentation on DiskUsage for how
|
||||||
|
// this affects the call.
|
||||||
//
|
//
|
||||||
// This operation will potentially block unless allowStaleValue is set to true. See the
|
// If the current size of the disk is larger than the maximum allowed size this
|
||||||
// documentation on DiskUsage for how this affects the call.
|
// function will return false, in all other cases it will return true. We do
|
||||||
|
// not check the existence of a virtual disk at this point since this logic is
|
||||||
|
// used to return friendly error messages to users, and also prevent us wasting
|
||||||
|
// time on more taxing operations when we know the result will end up failing due
|
||||||
|
// to space limits.
|
||||||
|
//
|
||||||
|
// If the servers disk limit is set to 0 it means there is no limit, however the
|
||||||
|
// DiskUsage method is still called to keep the cache warm. This function will
|
||||||
|
// always return true for a server with no limit set.
|
||||||
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
||||||
size, err := fs.DiskUsage(allowStaleValue)
|
size, err := fs.DiskUsage(allowStaleValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
|
fs.log().WithField("error", err).Warn("failed to determine root fs directory size")
|
||||||
|
}
|
||||||
|
return fs.MaxDisk() == 0 || size <= fs.MaxDisk()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
// CachedUsage returns the cached value for the amount of disk space used by the
|
||||||
//
|
// filesystem. Do not rely on this function for critical logical checks. It
|
||||||
// Technically we could skip disk space calculation because we don't need to check if the
|
// should only be used in areas where the actual disk usage does not need to be
|
||||||
// server exceeds it's limit but because this method caches the disk usage it would be best
|
// perfect, e.g. API responses for server resource usage.
|
||||||
// to calculate the disk usage and always return true.
|
|
||||||
if fs.MaxDisk() == 0 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return size <= fs.MaxDisk()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the cached value for the amount of disk space used by the filesystem. Do not rely on this
|
|
||||||
// function for critical logical checks. It should only be used in areas where the actual disk usage
|
|
||||||
// does not need to be perfect, e.g. API responses for server resource usage.
|
|
||||||
func (fs *Filesystem) CachedUsage() int64 {
|
func (fs *Filesystem) CachedUsage() int64 {
|
||||||
return atomic.LoadInt64(&fs.diskUsed)
|
return atomic.LoadInt64(&fs.diskUsed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
// DiskUsage is an internal helper function to allow other parts of the codebase
|
||||||
// as needed without overly taxing the system. This will prioritize the value from the cache to avoid
|
// to check the total used disk space as needed without overly taxing the system.
|
||||||
// excessive IO usage. We will only walk the filesystem and determine the size of the directory if there
|
// This will prioritize the value from the cache to avoid excessive IO usage. We
|
||||||
|
// will only walk the filesystem and determine the size of the directory if there
|
||||||
// is no longer a cached value.
|
// is no longer a cached value.
|
||||||
//
|
//
|
||||||
// If "allowStaleValue" is set to true, a stale value MAY be returned to the caller if there is an
|
// If "allowStaleValue" is set to true, a stale value MAY be returned to the
|
||||||
// expired cache value AND there is currently another lookup in progress. If there is no cached value but
|
// caller if there is an expired cache value AND there is currently another
|
||||||
// no other lookup is in progress, a fresh disk space response will be returned to the caller.
|
// lookup in progress. If there is no cached value but no other lookup is in
|
||||||
|
// progress, a fresh disk space response will be returned to the caller.
|
||||||
//
|
//
|
||||||
// This is primarily to avoid a bunch of I/O operations from piling up on the server, especially on servers
|
// This is primarily to avoid a bunch of I/O operations from piling up on the
|
||||||
// with a large amount of files.
|
// server, especially on servers with a large amount of files.
|
||||||
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
|
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
|
||||||
// A disk check interval of 0 means this functionality is completely disabled.
|
// A disk check interval of 0 means this functionality is completely disabled.
|
||||||
if fs.diskCheckInterval == 0 {
|
if fs.diskCheckInterval == 0 {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fs.lastLookupTime.Get().After(time.Now().Add(time.Second * fs.diskCheckInterval * -1)) {
|
since := time.Now().Add(time.Second * fs.diskCheckInterval * -1)
|
||||||
|
// If the last lookup time was before our calculated limit we will re-execute this
|
||||||
|
// checking logic. If the lookup time was after the oldest possible timestamp we will
|
||||||
|
// continue returning the cached value.
|
||||||
|
if fs.lastLookupTime.Get().Before(since) {
|
||||||
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
|
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
|
||||||
// value. This is a blocking operation to the calling process.
|
// value. This is a blocking operation to the calling process.
|
||||||
if !allowStaleValue {
|
if !allowStaleValue {
|
||||||
return fs.updateCachedDiskUsage()
|
return fs.updateCachedDiskUsage()
|
||||||
} else if !fs.lookupInProgress.Load() {
|
}
|
||||||
|
|
||||||
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
|
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
|
||||||
// currently performing a lookup, just do the disk usage calculation in the background.
|
// currently performing a lookup, just do the disk usage calculation in the background.
|
||||||
|
if !fs.lookupInProgress.Load() {
|
||||||
go func(fs *Filesystem) {
|
go func(fs *Filesystem) {
|
||||||
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
||||||
log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
|
fs.log().WithField("error", err).Warn("failed to update fs disk usage from within routine")
|
||||||
}
|
}
|
||||||
}(fs)
|
}(fs)
|
||||||
}
|
}
|
||||||
@@ -194,11 +234,14 @@ func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
|||||||
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to determine if a server has space available for a file of a given size.
|
// HasSpaceFor is a function to determine if a server has space available for a
|
||||||
// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
|
// file of a given size. If space is available, no error will be returned,
|
||||||
// will be raised.
|
// otherwise an ErrNotEnoughSpace error will be raised. If this filesystem is
|
||||||
|
// configured as a virtual disk this function is a no-op as we will fall through
|
||||||
|
// to the native implementation to throw back an error if there is not disk
|
||||||
|
// space available.
|
||||||
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
||||||
if fs.MaxDisk() == 0 {
|
if fs.IsVirtual() || fs.MaxDisk() == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
s, err := fs.DiskUsage(true)
|
s, err := fs.DiskUsage(true)
|
||||||
@@ -234,3 +277,7 @@ func (fs *Filesystem) addDisk(i int64) int64 {
|
|||||||
|
|
||||||
return atomic.AddInt64(&fs.diskUsed, i)
|
return atomic.AddInt64(&fs.diskUsed, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) log() *log.Entry {
|
||||||
|
return log.WithField("server", fs.uuid).WithField("root", fs.root)
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/internal/vhd"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,19 +31,23 @@ type Filesystem struct {
|
|||||||
diskUsed int64
|
diskUsed int64
|
||||||
diskCheckInterval time.Duration
|
diskCheckInterval time.Duration
|
||||||
denylist *ignore.GitIgnore
|
denylist *ignore.GitIgnore
|
||||||
|
vhd *vhd.Disk
|
||||||
|
|
||||||
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
|
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
|
||||||
diskLimit int64
|
diskLimit int64
|
||||||
|
|
||||||
// The root data directory path for this Filesystem instance.
|
// The root data directory path for this Filesystem instance.
|
||||||
root string
|
root string
|
||||||
|
uuid string
|
||||||
|
|
||||||
isTest bool
|
isTest bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Filesystem instance for a given server.
|
// New creates a new Filesystem instance for a given server.
|
||||||
func New(root string, size int64, denylist []string) *Filesystem {
|
func New(uuid string, size int64, denylist []string) *Filesystem {
|
||||||
return &Filesystem{
|
root := filepath.Join(config.Get().System.Data, uuid)
|
||||||
|
fs := Filesystem{
|
||||||
|
uuid: uuid,
|
||||||
root: root,
|
root: root,
|
||||||
diskLimit: size,
|
diskLimit: size,
|
||||||
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
||||||
@@ -50,6 +55,15 @@ func New(root string, size int64, denylist []string) *Filesystem {
|
|||||||
lookupInProgress: system.NewAtomicBool(false),
|
lookupInProgress: system.NewAtomicBool(false),
|
||||||
denylist: ignore.CompileIgnoreLines(denylist...),
|
denylist: ignore.CompileIgnoreLines(denylist...),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If VHD support is enabled but this server is configured with no disk size
|
||||||
|
// limit we cannot actually use a virtual disk. In that case fall back to using
|
||||||
|
// the default driver.
|
||||||
|
if vhd.Enabled() && size > 0 {
|
||||||
|
fs.vhd = vhd.New(size, vhd.DiskPath(uuid), fs.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &fs
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path returns the root path for the Filesystem instance.
|
// Path returns the root path for the Filesystem instance.
|
||||||
@@ -77,9 +91,9 @@ func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
|
|||||||
return f, st, nil
|
return f, st, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Acts by creating the given file and path on the disk if it is not present already. If
|
// Touch acts by creating the given file and path on the disk if it is not present
|
||||||
// it is present, the file is opened using the defaults which will truncate the contents.
|
// already. If it is present, the file is opened using the defaults which will
|
||||||
// The opened file is then returned to the caller.
|
// truncate the contents. The opened file is then returned to the caller.
|
||||||
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
||||||
cleaned, err := fs.SafePath(p)
|
cleaned, err := fs.SafePath(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -115,19 +129,6 @@ func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
|||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads a file on the system and returns it as a byte representation in a file
|
|
||||||
// reader. This is not the most memory efficient usage since it will be reading the
|
|
||||||
// entirety of the file into memory.
|
|
||||||
func (fs *Filesystem) Readfile(p string, w io.Writer) error {
|
|
||||||
file, _, err := fs.File(p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
_, err = bufio.NewReader(file).WriteTo(w)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writefile writes a file to the system. If the file does not already exist one
|
// Writefile writes a file to the system. If the file does not already exist one
|
||||||
// will be created. This will also properly recalculate the disk space used by
|
// will be created. This will also properly recalculate the disk space used by
|
||||||
// the server when writing new files or modifying existing ones.
|
// the server when writing new files or modifying existing ones.
|
||||||
@@ -168,6 +169,12 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
|||||||
|
|
||||||
buf := make([]byte, 1024*4)
|
buf := make([]byte, 1024*4)
|
||||||
sz, err := io.CopyBuffer(file, r, buf)
|
sz, err := io.CopyBuffer(file, r, buf)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "no space left on device") {
|
||||||
|
return newFilesystemError(ErrCodeDiskSpace, err)
|
||||||
|
}
|
||||||
|
return errors.WrapIf(err, "filesystem: failed to copy buffer for file write")
|
||||||
|
}
|
||||||
|
|
||||||
// Adjust the disk usage to account for the old size and the new size of the file.
|
// Adjust the disk usage to account for the old size and the new size of the file.
|
||||||
fs.addDisk(sz - currentSize)
|
fs.addDisk(sz - currentSize)
|
||||||
@@ -184,16 +191,16 @@ func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
|||||||
return os.MkdirAll(cleaned, 0o755)
|
return os.MkdirAll(cleaned, 0o755)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves (or renames) a file or directory.
|
// Rename moves (or renames) a file or directory.
|
||||||
func (fs *Filesystem) Rename(from string, to string) error {
|
func (fs *Filesystem) Rename(from string, to string) error {
|
||||||
cleanedFrom, err := fs.SafePath(from)
|
cleanedFrom, err := fs.SafePath(from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanedTo, err := fs.SafePath(to)
|
cleanedTo, err := fs.SafePath(to)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the target file or directory already exists the rename function will fail, so just
|
// If the target file or directory already exists the rename function will fail, so just
|
||||||
@@ -215,7 +222,10 @@ func (fs *Filesystem) Rename(from string, to string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return os.Rename(cleanedFrom, cleanedTo)
|
if err := os.Rename(cleanedFrom, cleanedTo); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursively iterates over a file or directory and sets the permissions on all of the
|
// Recursively iterates over a file or directory and sets the permissions on all of the
|
||||||
@@ -322,8 +332,9 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
|
|||||||
return name + suffix + extension, nil
|
return name + suffix + extension, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copies a given file to the same location and appends a suffix to the file to indicate that
|
// Copy takes a given input file path and creates a copy of the file at the same
|
||||||
// it has been copied.
|
// location, appending a unique number to the end. For example, a copy of "test.txt"
|
||||||
|
// would create "test 2.txt" as the copy, then "test 3.txt" and so on.
|
||||||
func (fs *Filesystem) Copy(p string) error {
|
func (fs *Filesystem) Copy(p string) error {
|
||||||
cleaned, err := fs.SafePath(p)
|
cleaned, err := fs.SafePath(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -492,7 +503,11 @@ func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
|
|||||||
cleanedp, _ = fs.SafePath(filepath.Join(cleaned, f.Name()))
|
cleanedp, _ = fs.SafePath(filepath.Join(cleaned, f.Name()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if cleanedp != "" {
|
// Don't try to detect the type on a pipe — this will just hang the application and
|
||||||
|
// you'll never get a response back.
|
||||||
|
//
|
||||||
|
// @see https://github.com/pterodactyl/panel/issues/4059
|
||||||
|
if cleanedp != "" && f.Mode()&os.ModeNamedPipe == 0 {
|
||||||
m, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
|
m, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
|
||||||
} else {
|
} else {
|
||||||
// Just pass this for an unknown type because the file could not safely be resolved within
|
// Just pass this for an unknown type because the file could not safely be resolved within
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -44,6 +45,14 @@ type rootFs struct {
|
|||||||
root string
|
root string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getFileContent(file *os.File) string {
|
||||||
|
var w bytes.Buffer
|
||||||
|
if _, err := bufio.NewReader(file).WriteTo(&w); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return w.String()
|
||||||
|
}
|
||||||
|
|
||||||
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
|
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
|
||||||
f, err := os.Create(filepath.Join(rfs.root, "/server", p))
|
f, err := os.Create(filepath.Join(rfs.root, "/server", p))
|
||||||
|
|
||||||
@@ -75,54 +84,6 @@ func (rfs *rootFs) reset() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilesystem_Readfile(t *testing.T) {
|
|
||||||
g := Goblin(t)
|
|
||||||
fs, rfs := NewFs()
|
|
||||||
|
|
||||||
g.Describe("Readfile", func() {
|
|
||||||
buf := &bytes.Buffer{}
|
|
||||||
|
|
||||||
g.It("opens a file if it exists on the system", func() {
|
|
||||||
err := rfs.CreateServerFileFromString("test.txt", "testing")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
err = fs.Readfile("test.txt", buf)
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(buf.String()).Equal("testing")
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("returns an error if the file does not exist", func() {
|
|
||||||
err := fs.Readfile("test.txt", buf)
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("returns an error if the \"file\" is a directory", func() {
|
|
||||||
err := os.Mkdir(filepath.Join(rfs.root, "/server/test.txt"), 0o755)
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
err = fs.Readfile("test.txt", buf)
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodeIsDirectory)).IsTrue()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("cannot open a file outside the root directory", func() {
|
|
||||||
err := rfs.CreateServerFileFromString("/../test.txt", "testing")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
|
|
||||||
err = fs.Readfile("/../test.txt", buf)
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.AfterEach(func() {
|
|
||||||
buf.Truncate(0)
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
|
||||||
rfs.reset()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFilesystem_Writefile(t *testing.T) {
|
func TestFilesystem_Writefile(t *testing.T) {
|
||||||
g := Goblin(t)
|
g := Goblin(t)
|
||||||
fs, rfs := NewFs()
|
fs, rfs := NewFs()
|
||||||
@@ -140,9 +101,10 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
err := fs.Writefile("test.txt", r)
|
err := fs.Writefile("test.txt", r)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Readfile("test.txt", buf)
|
f, _, err := fs.File("test.txt")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(buf.String()).Equal("test file content")
|
defer f.Close()
|
||||||
|
g.Assert(getFileContent(f)).Equal("test file content")
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(r.Size())
|
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(r.Size())
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -152,9 +114,10 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
err := fs.Writefile("/some/nested/test.txt", r)
|
err := fs.Writefile("/some/nested/test.txt", r)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Readfile("/some/nested/test.txt", buf)
|
f, _, err := fs.File("/some/nested/test.txt")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(buf.String()).Equal("test file content")
|
defer f.Close()
|
||||||
|
g.Assert(getFileContent(f)).Equal("test file content")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("can create a new file inside a nested directory without a trailing slash", func() {
|
g.It("can create a new file inside a nested directory without a trailing slash", func() {
|
||||||
@@ -163,9 +126,10 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
err := fs.Writefile("some/../foo/bar/test.txt", r)
|
err := fs.Writefile("some/../foo/bar/test.txt", r)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Readfile("foo/bar/test.txt", buf)
|
f, _, err := fs.File("foo/bar/test.txt")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(buf.String()).Equal("test file content")
|
defer f.Close()
|
||||||
|
g.Assert(getFileContent(f)).Equal("test file content")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot create a file outside the root directory", func() {
|
g.It("cannot create a file outside the root directory", func() {
|
||||||
@@ -190,28 +154,6 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
||||||
})
|
})
|
||||||
|
|
||||||
/*g.It("updates the total space used when a file is appended to", func() {
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, 100)
|
|
||||||
|
|
||||||
b := make([]byte, 100)
|
|
||||||
_, _ = rand.Read(b)
|
|
||||||
|
|
||||||
r := bytes.NewReader(b)
|
|
||||||
err := fs.Writefile("test.txt", r)
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(200))
|
|
||||||
|
|
||||||
// If we write less data than already exists, we should expect the total
|
|
||||||
// disk used to be decremented.
|
|
||||||
b = make([]byte, 50)
|
|
||||||
_, _ = rand.Read(b)
|
|
||||||
|
|
||||||
r = bytes.NewReader(b)
|
|
||||||
err = fs.Writefile("test.txt", r)
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(150))
|
|
||||||
})*/
|
|
||||||
|
|
||||||
g.It("truncates the file when writing new contents", func() {
|
g.It("truncates the file when writing new contents", func() {
|
||||||
r := bytes.NewReader([]byte("original data"))
|
r := bytes.NewReader([]byte("original data"))
|
||||||
err := fs.Writefile("test.txt", r)
|
err := fs.Writefile("test.txt", r)
|
||||||
@@ -221,9 +163,10 @@ func TestFilesystem_Writefile(t *testing.T) {
|
|||||||
err = fs.Writefile("test.txt", r)
|
err = fs.Writefile("test.txt", r)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Readfile("test.txt", buf)
|
f, _, err := fs.File("test.txt")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(buf.String()).Equal("new data")
|
defer f.Close()
|
||||||
|
g.Assert(getFileContent(f)).Equal("new data")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
|
|||||||
@@ -119,16 +119,6 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
g.Describe("Readfile", func() {
|
|
||||||
g.It("cannot read a file symlinked outside the root", func() {
|
|
||||||
b := bytes.Buffer{}
|
|
||||||
|
|
||||||
err := fs.Readfile("symlinked.txt", &b)
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
g.Describe("Writefile", func() {
|
g.Describe("Writefile", func() {
|
||||||
g.It("cannot write to a file symlinked outside the root", func() {
|
g.It("cannot write to a file symlinked outside the root", func() {
|
||||||
r := bytes.NewReader([]byte("testing"))
|
r := bytes.NewReader([]byte("testing"))
|
||||||
|
|||||||
42
server/filesystem/virtual.go
Normal file
42
server/filesystem/virtual.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"github.com/pterodactyl/wings/internal/vhd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsVirtual returns true if the filesystem is currently using a virtual disk.
|
||||||
|
func (fs *Filesystem) IsVirtual() bool {
|
||||||
|
return fs.vhd != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigureDisk will attempt to create a new VHD if there is not one already
|
||||||
|
// created for the filesystem. If there is this method will attempt to resize
|
||||||
|
// the underlying data volume. Passing a size of 0 or less will panic.
|
||||||
|
func (fs *Filesystem) ConfigureDisk(ctx context.Context, size int64) error {
|
||||||
|
if size <= 0 {
|
||||||
|
panic("filesystem: attempt to configure disk with empty size")
|
||||||
|
}
|
||||||
|
if fs.vhd == nil {
|
||||||
|
fs.vhd = vhd.New(size, vhd.DiskPath(fs.uuid), fs.root)
|
||||||
|
if err := fs.MountDisk(ctx); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Resize the disk now that it is for sure mounted and exists on the system.
|
||||||
|
if err := fs.vhd.Resize(ctx, size); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MountDisk will attempt to mount the underlying virtual disk for the server.
|
||||||
|
// If the disk is already mounted this is a no-op function.
|
||||||
|
func (fs *Filesystem) MountDisk(ctx context.Context) error {
|
||||||
|
err := fs.vhd.Mount(ctx)
|
||||||
|
if errors.Is(err, vhd.ErrFilesystemMounted) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.WrapIf(err, "filesystem: failed to mount VHD")
|
||||||
|
}
|
||||||
@@ -18,6 +18,7 @@ import (
|
|||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
@@ -449,6 +450,7 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
|||||||
},
|
},
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||||
|
UsernsMode: container.UsernsMode(config.Get().Docker.UsernsMode),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the root directory for the server exists properly before attempting
|
// Ensure the root directory for the server exists properly before attempting
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -25,14 +24,16 @@ import (
|
|||||||
type Manager struct {
|
type Manager struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
client remote.Client
|
client remote.Client
|
||||||
|
skipVhdInitialization bool
|
||||||
servers []*Server
|
servers []*Server
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager returns a new server manager instance. This will boot up all the
|
// NewManager returns a new server manager instance. This will boot up all the
|
||||||
// servers that are currently present on the filesystem and set them into the
|
// servers that are currently present on the filesystem and set them into the
|
||||||
// manager.
|
// manager.
|
||||||
func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
|
func NewManager(ctx context.Context, client remote.Client, skipVhdInit bool) (*Manager, error) {
|
||||||
m := NewEmptyManager(client)
|
m := NewEmptyManager(client)
|
||||||
|
m.skipVhdInitialization = skipVhdInit
|
||||||
if err := m.init(ctx); err != nil {
|
if err := m.init(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -52,6 +53,24 @@ func (m *Manager) Client() remote.Client {
|
|||||||
return m.client
|
return m.client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Len returns the count of servers stored in the manager instance.
|
||||||
|
func (m *Manager) Len() int {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
return len(m.servers)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys returns all of the server UUIDs stored in the manager set.
|
||||||
|
func (m *Manager) Keys() []string {
|
||||||
|
m.mu.RLock()
|
||||||
|
defer m.mu.RUnlock()
|
||||||
|
keys := make([]string, len(m.servers))
|
||||||
|
for i, s := range m.servers {
|
||||||
|
keys[i] = s.ID()
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
// Put replaces all the current values in the collection with the value that
|
// Put replaces all the current values in the collection with the value that
|
||||||
// is passed through.
|
// is passed through.
|
||||||
func (m *Manager) Put(s []*Server) {
|
func (m *Manager) Put(s []*Server) {
|
||||||
@@ -166,7 +185,7 @@ func (m *Manager) ReadStates() (map[string]string, error) {
|
|||||||
// InitServer initializes a server using a data byte array. This will be
|
// InitServer initializes a server using a data byte array. This will be
|
||||||
// marshaled into the given struct using a YAML marshaler. This will also
|
// marshaled into the given struct using a YAML marshaler. This will also
|
||||||
// configure the given environment for a server.
|
// configure the given environment for a server.
|
||||||
func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server, error) {
|
func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfigurationResponse) (*Server, error) {
|
||||||
s, err := New(m.client)
|
s, err := New(m.client)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -178,7 +197,15 @@ func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server,
|
|||||||
return nil, errors.WithStackIf(err)
|
return nil, errors.WithStackIf(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
s.fs = filesystem.New(s.ID(), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
||||||
|
// If this is a virtual filesystem we need to go ahead and mount the disk
|
||||||
|
// so that everything is accessible.
|
||||||
|
if s.fs.IsVirtual() && !m.skipVhdInitialization {
|
||||||
|
log.WithField("server", s.ID()).Info("mounting virtual disk for server")
|
||||||
|
if err := s.fs.MountDisk(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||||
// this logic in. When we're ready to support other environment we'll need to make
|
// this logic in. When we're ready to support other environment we'll need to make
|
||||||
@@ -240,7 +267,7 @@ func (m *Manager) init(ctx context.Context) error {
|
|||||||
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
|
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s, err := m.InitServer(d)
|
s, err := m.InitServer(ctx, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
|
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -179,6 +179,8 @@ func (s *Server) Log() *log.Entry {
|
|||||||
//
|
//
|
||||||
// This also means mass actions can be performed against servers on the Panel
|
// This also means mass actions can be performed against servers on the Panel
|
||||||
// and they will automatically sync with Wings when the server is started.
|
// and they will automatically sync with Wings when the server is started.
|
||||||
|
//
|
||||||
|
// TODO: accept a context value rather than using the server's context.
|
||||||
func (s *Server) Sync() error {
|
func (s *Server) Sync() error {
|
||||||
cfg, err := s.client.GetServerConfiguration(s.Context(), s.ID())
|
cfg, err := s.client.GetServerConfiguration(s.Context(), s.ID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -194,7 +196,9 @@ func (s *Server) Sync() error {
|
|||||||
|
|
||||||
// Update the disk space limits for the server whenever the configuration for
|
// Update the disk space limits for the server whenever the configuration for
|
||||||
// it changes.
|
// it changes.
|
||||||
s.fs.SetDiskLimit(s.DiskSpace())
|
if err := s.fs.SetDiskLimit(s.Context(), s.DiskSpace()); err != nil {
|
||||||
|
return errors.WrapIf(err, "server: failed to sync server configuration from API")
|
||||||
|
}
|
||||||
|
|
||||||
s.SyncWithEnvironment()
|
s.SyncWithEnvironment()
|
||||||
|
|
||||||
|
|||||||
59
sftp/event.go
Normal file
59
sftp/event.go
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
package sftp
|
||||||
|
|
||||||
|
import (
|
||||||
|
"emperror.dev/errors"
|
||||||
|
"github.com/apex/log"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/database"
|
||||||
|
"github.com/pterodactyl/wings/internal/models"
|
||||||
|
)
|
||||||
|
|
||||||
|
type eventHandler struct {
|
||||||
|
ip string
|
||||||
|
user string
|
||||||
|
server string
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileAction struct {
|
||||||
|
// Entity is the targeted file or directory (depending on the event) that the action
|
||||||
|
// is being performed _against_, such as "/foo/test.txt". This will always be the full
|
||||||
|
// path to the element.
|
||||||
|
Entity string
|
||||||
|
// Target is an optional (often blank) field that only has a value in it when the event
|
||||||
|
// is specifically modifying the entity, such as a rename or move event. In that case
|
||||||
|
// the Target field will be the final value, such as "/bar/new.txt"
|
||||||
|
Target string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log parses a SFTP specific file activity event and then passes it off to be stored
|
||||||
|
// in the normal activity database.
|
||||||
|
func (eh *eventHandler) Log(e models.Event, fa FileAction) error {
|
||||||
|
metadata := map[string]interface{}{
|
||||||
|
"files": []string{fa.Entity},
|
||||||
|
}
|
||||||
|
if fa.Target != "" {
|
||||||
|
metadata["files"] = []map[string]string{
|
||||||
|
{"from": fa.Entity, "to": fa.Target},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
a := models.Activity{
|
||||||
|
Server: eh.server,
|
||||||
|
Event: e,
|
||||||
|
Metadata: metadata,
|
||||||
|
IP: eh.ip,
|
||||||
|
}
|
||||||
|
|
||||||
|
if tx := database.Instance().Create(a.SetUser(eh.user)); tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustLog is a wrapper around log that will trigger a fatal error and exit the application
|
||||||
|
// if an error is encountered during the logging of the event.
|
||||||
|
func (eh *eventHandler) MustLog(e models.Event, fa FileAction) {
|
||||||
|
if err := eh.Log(e, fa); err != nil {
|
||||||
|
log.WithField("error", errors.WithStack(err)).WithField("event", e).Error("sftp: failed to log event")
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -28,31 +28,39 @@ const (
|
|||||||
|
|
||||||
type Handler struct {
|
type Handler struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|
||||||
permissions []string
|
|
||||||
server *server.Server
|
server *server.Server
|
||||||
fs *filesystem.Filesystem
|
fs *filesystem.Filesystem
|
||||||
|
events *eventHandler
|
||||||
|
permissions []string
|
||||||
logger *log.Entry
|
logger *log.Entry
|
||||||
ro bool
|
ro bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a new connection handler for the SFTP server. This allows a given user
|
// NewHandler returns a new connection handler for the SFTP server. This allows a given user
|
||||||
// to access the underlying filesystem.
|
// to access the underlying filesystem.
|
||||||
func NewHandler(sc *ssh.ServerConn, srv *server.Server) *Handler {
|
func NewHandler(sc *ssh.ServerConn, srv *server.Server) (*Handler, error) {
|
||||||
|
uuid, ok := sc.Permissions.Extensions["user"]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("sftp: mismatched Wings and Panel versions — Panel 1.10 is required for this version of Wings.")
|
||||||
|
}
|
||||||
|
|
||||||
|
events := eventHandler{
|
||||||
|
ip: sc.RemoteAddr().String(),
|
||||||
|
user: uuid,
|
||||||
|
server: srv.ID(),
|
||||||
|
}
|
||||||
|
|
||||||
return &Handler{
|
return &Handler{
|
||||||
permissions: strings.Split(sc.Permissions.Extensions["permissions"], ","),
|
permissions: strings.Split(sc.Permissions.Extensions["permissions"], ","),
|
||||||
server: srv,
|
server: srv,
|
||||||
fs: srv.Filesystem(),
|
fs: srv.Filesystem(),
|
||||||
|
events: &events,
|
||||||
ro: config.Get().System.Sftp.ReadOnly,
|
ro: config.Get().System.Sftp.ReadOnly,
|
||||||
logger: log.WithFields(log.Fields{
|
logger: log.WithFields(log.Fields{"subsystem": "sftp", "user": uuid, "ip": sc.RemoteAddr()}),
|
||||||
"subsystem": "sftp",
|
}, nil
|
||||||
"username": sc.User(),
|
|
||||||
"ip": sc.RemoteAddr(),
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the sftp.Handlers for this struct.
|
// Handlers returns the sftp.Handlers for this struct.
|
||||||
func (h *Handler) Handlers() sftp.Handlers {
|
func (h *Handler) Handlers() sftp.Handlers {
|
||||||
return sftp.Handlers{
|
return sftp.Handlers{
|
||||||
FileGet: h,
|
FileGet: h,
|
||||||
@@ -121,11 +129,16 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
|
|||||||
}
|
}
|
||||||
// Chown may or may not have been called in the touch function, so always do
|
// Chown may or may not have been called in the touch function, so always do
|
||||||
// it at this point to avoid the file being improperly owned.
|
// it at this point to avoid the file being improperly owned.
|
||||||
_ = h.server.Filesystem().Chown(request.Filepath)
|
_ = h.fs.Chown(request.Filepath)
|
||||||
|
event := server.ActivitySftpWrite
|
||||||
|
if permission == PermissionFileCreate {
|
||||||
|
event = server.ActivitySftpCreate
|
||||||
|
}
|
||||||
|
h.events.MustLog(event, FileAction{Entity: request.Filepath})
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filecmd handler for basic SFTP system calls related to files, but not anything to do with reading
|
// Filecmd hander for basic SFTP system calls related to files, but not anything to do with reading
|
||||||
// or writing to those files.
|
// or writing to those files.
|
||||||
func (h *Handler) Filecmd(request *sftp.Request) error {
|
func (h *Handler) Filecmd(request *sftp.Request) error {
|
||||||
if h.ro {
|
if h.ro {
|
||||||
@@ -172,6 +185,7 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||||||
l.WithField("error", err).Error("failed to rename file")
|
l.WithField("error", err).Error("failed to rename file")
|
||||||
return sftp.ErrSSHFxFailure
|
return sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
|
h.events.MustLog(server.ActivitySftpRename, FileAction{Entity: request.Filepath, Target: request.Target})
|
||||||
break
|
break
|
||||||
// Handle deletion of a directory. This will properly delete all of the files and
|
// Handle deletion of a directory. This will properly delete all of the files and
|
||||||
// folders within that directory if it is not already empty (unlike a lot of SFTP
|
// folders within that directory if it is not already empty (unlike a lot of SFTP
|
||||||
@@ -180,10 +194,12 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||||||
if !h.can(PermissionFileDelete) {
|
if !h.can(PermissionFileDelete) {
|
||||||
return sftp.ErrSSHFxPermissionDenied
|
return sftp.ErrSSHFxPermissionDenied
|
||||||
}
|
}
|
||||||
if err := h.fs.Delete(request.Filepath); err != nil {
|
p := filepath.Clean(request.Filepath)
|
||||||
|
if err := h.fs.Delete(p); err != nil {
|
||||||
l.WithField("error", err).Error("failed to remove directory")
|
l.WithField("error", err).Error("failed to remove directory")
|
||||||
return sftp.ErrSSHFxFailure
|
return sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
|
h.events.MustLog(server.ActivitySftpDelete, FileAction{Entity: request.Filepath})
|
||||||
return sftp.ErrSSHFxOk
|
return sftp.ErrSSHFxOk
|
||||||
// Handle requests to create a new Directory.
|
// Handle requests to create a new Directory.
|
||||||
case "Mkdir":
|
case "Mkdir":
|
||||||
@@ -191,11 +207,12 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||||||
return sftp.ErrSSHFxPermissionDenied
|
return sftp.ErrSSHFxPermissionDenied
|
||||||
}
|
}
|
||||||
name := strings.Split(filepath.Clean(request.Filepath), "/")
|
name := strings.Split(filepath.Clean(request.Filepath), "/")
|
||||||
err := h.fs.CreateDirectory(name[len(name)-1], strings.Join(name[0:len(name)-1], "/"))
|
p := strings.Join(name[0:len(name)-1], "/")
|
||||||
if err != nil {
|
if err := h.fs.CreateDirectory(name[len(name)-1], p); err != nil {
|
||||||
l.WithField("error", err).Error("failed to create directory")
|
l.WithField("error", err).Error("failed to create directory")
|
||||||
return sftp.ErrSSHFxFailure
|
return sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
|
h.events.MustLog(server.ActivitySftpCreateDirectory, FileAction{Entity: request.Filepath})
|
||||||
break
|
break
|
||||||
// Support creating symlinks between files. The source and target must resolve within
|
// Support creating symlinks between files. The source and target must resolve within
|
||||||
// the server home directory.
|
// the server home directory.
|
||||||
@@ -228,6 +245,7 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||||||
l.WithField("error", err).Error("failed to remove a file")
|
l.WithField("error", err).Error("failed to remove a file")
|
||||||
return sftp.ErrSSHFxFailure
|
return sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
|
h.events.MustLog(server.ActivitySftpDelete, FileAction{Entity: request.Filepath})
|
||||||
return sftp.ErrSSHFxOk
|
return sftp.ErrSSHFxOk
|
||||||
default:
|
default:
|
||||||
return sftp.ErrSSHFxOpUnsupported
|
return sftp.ErrSSHFxOpUnsupported
|
||||||
@@ -287,15 +305,10 @@ func (h *Handler) can(permission string) bool {
|
|||||||
if h.server.IsSuspended() {
|
if h.server.IsSuspended() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// SFTPServer owners and super admins have their permissions returned as '[*]' via the Panel
|
|
||||||
// API, so for the sake of speed do an initial check for that before iterating over the
|
|
||||||
// entire array of permissions.
|
|
||||||
if len(h.permissions) == 1 && h.permissions[0] == "*" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for _, p := range h.permissions {
|
for _, p := range h.permissions {
|
||||||
if p == permission {
|
// If we match the permission specifically, or the user has been granted the "*"
|
||||||
|
// permission because they're an admin, let them through.
|
||||||
|
if p == permission || p == "*" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
292
sftp/server.go
292
sftp/server.go
@@ -1,17 +1,11 @@
|
|||||||
package sftp
|
package sftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/ed25519"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -22,6 +16,7 @@ import (
|
|||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
|
"golang.org/x/crypto/ed25519"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
@@ -56,7 +51,18 @@ func New(m *server.Manager) *SFTPServer {
|
|||||||
// SFTP connections. This will automatically generate an ED25519 key if one does
|
// SFTP connections. This will automatically generate an ED25519 key if one does
|
||||||
// not already exist on the system for host key verification purposes.
|
// not already exist on the system for host key verification purposes.
|
||||||
func (c *SFTPServer) Run() error {
|
func (c *SFTPServer) Run() error {
|
||||||
keys, err := c.loadPrivateKeys()
|
if _, err := os.Stat(c.PrivateKeyPath()); os.IsNotExist(err) {
|
||||||
|
if err := c.generateED25519PrivateKey(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
return errors.Wrap(err, "sftp: could not stat private key file")
|
||||||
|
}
|
||||||
|
pb, err := os.ReadFile(c.PrivateKeyPath())
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "sftp: could not read private key file")
|
||||||
|
}
|
||||||
|
private, err := ssh.ParsePrivateKey(pb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -64,37 +70,42 @@ func (c *SFTPServer) Run() error {
|
|||||||
conf := &ssh.ServerConfig{
|
conf := &ssh.ServerConfig{
|
||||||
NoClientAuth: false,
|
NoClientAuth: false,
|
||||||
MaxAuthTries: 6,
|
MaxAuthTries: 6,
|
||||||
PasswordCallback: c.passwordCallback,
|
PasswordCallback: func(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
|
||||||
PublicKeyCallback: c.publicKeyCallback,
|
return c.makeCredentialsRequest(conn, remote.SftpAuthPassword, string(password))
|
||||||
}
|
},
|
||||||
|
PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
|
||||||
for _, k := range keys {
|
return c.makeCredentialsRequest(conn, remote.SftpAuthPublicKey, string(ssh.MarshalAuthorizedKey(key)))
|
||||||
conf.AddHostKey(k)
|
},
|
||||||
}
|
}
|
||||||
|
conf.AddHostKey(private)
|
||||||
|
|
||||||
listener, err := net.Listen("tcp", c.Listen)
|
listener, err := net.Listen("tcp", c.Listen)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.WithField("listen", c.Listen).Info("sftp server listening for connections")
|
public := string(ssh.MarshalAuthorizedKey(private.PublicKey()))
|
||||||
|
log.WithField("listen", c.Listen).WithField("public_key", strings.Trim(public, "\n")).Info("sftp server listening for connections")
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if conn, _ := listener.Accept(); conn != nil {
|
if conn, _ := listener.Accept(); conn != nil {
|
||||||
go func(conn net.Conn) {
|
go func(conn net.Conn) {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
c.AcceptInbound(conn, conf)
|
if err := c.AcceptInbound(conn, conf); err != nil {
|
||||||
|
log.WithField("error", err).Error("sftp: failed to accept inbound connection")
|
||||||
|
}
|
||||||
}(conn)
|
}(conn)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handles an inbound connection to the instance and determines if we should serve the
|
// AcceptInbound handles an inbound connection to the instance and determines if we should
|
||||||
// request or not.
|
// serve the request or not.
|
||||||
func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
|
func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) error {
|
||||||
// Before beginning a handshake must be performed on the incoming net.Conn
|
// Before beginning a handshake must be performed on the incoming net.Conn
|
||||||
sconn, chans, reqs, err := ssh.NewServerConn(conn, config)
|
sconn, chans, reqs, err := ssh.NewServerConn(conn, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
defer sconn.Close()
|
defer sconn.Close()
|
||||||
go ssh.DiscardRequests(reqs)
|
go ssh.DiscardRequests(reqs)
|
||||||
@@ -103,7 +114,7 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
|
|||||||
// If its not a session channel we just move on because its not something we
|
// If its not a session channel we just move on because its not something we
|
||||||
// know how to handle at this point.
|
// know how to handle at this point.
|
||||||
if ch.ChannelType() != "session" {
|
if ch.ChannelType() != "session" {
|
||||||
_ = ch.Reject(ssh.UnknownChannelType, "unknown channel type")
|
ch.Reject(ssh.UnknownChannelType, "unknown channel type")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,7 +128,7 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
|
|||||||
// Channels have a type that is dependent on the protocol. For SFTP
|
// Channels have a type that is dependent on the protocol. For SFTP
|
||||||
// this is "subsystem" with a payload that (should) be "sftp". Discard
|
// this is "subsystem" with a payload that (should) be "sftp". Discard
|
||||||
// anything else we receive ("pty", "shell", etc)
|
// anything else we receive ("pty", "shell", etc)
|
||||||
_ = req.Reply(req.Type == "subsystem" && string(req.Payload[4:]) == "sftp", nil)
|
req.Reply(req.Type == "subsystem" && string(req.Payload[4:]) == "sftp", nil)
|
||||||
}
|
}
|
||||||
}(requests)
|
}(requests)
|
||||||
|
|
||||||
@@ -135,180 +146,61 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
|
|||||||
return s.ID() == uuid
|
return s.ID() == uuid
|
||||||
})
|
})
|
||||||
if srv == nil {
|
if srv == nil {
|
||||||
_ = conn.Close()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spin up a SFTP server instance for the authenticated user's server allowing
|
// Spin up a SFTP server instance for the authenticated user's server allowing
|
||||||
// them access to the underlying filesystem.
|
// them access to the underlying filesystem.
|
||||||
handler := sftp.NewRequestServer(channel, NewHandler(sconn, srv).Handlers())
|
handler, err := NewHandler(sconn, srv)
|
||||||
if err := handler.Serve(); err == io.EOF {
|
if err != nil {
|
||||||
_ = handler.Close()
|
return errors.WithStackIf(err)
|
||||||
}
|
}
|
||||||
|
rs := sftp.NewRequestServer(channel, handler.Handlers())
|
||||||
|
if err := rs.Serve(); err == io.EOF {
|
||||||
|
_ = rs.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SFTPServer) loadPrivateKeys() ([]ssh.Signer, error) {
|
return nil
|
||||||
if _, err := os.Stat(path.Join(c.BasePath, ".sftp/id_rsa")); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.generateRSAPrivateKey(); err != nil {
|
// Generates a new ED25519 private key that is used for host authentication when
|
||||||
return nil, err
|
// a user connects to the SFTP server.
|
||||||
}
|
func (c *SFTPServer) generateED25519PrivateKey() error {
|
||||||
}
|
_, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
rsaBytes, err := ioutil.ReadFile(path.Join(c.BasePath, ".sftp/id_rsa"))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "sftp/server: could not read private key file")
|
return errors.Wrap(err, "sftp: failed to generate ED25519 private key")
|
||||||
}
|
}
|
||||||
rsaPrivateKey, err := ssh.ParsePrivateKey(rsaBytes)
|
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath()), 0o755); err != nil {
|
||||||
|
return errors.Wrap(err, "sftp: could not create internal sftp data directory")
|
||||||
|
}
|
||||||
|
o, err := os.OpenFile(c.PrivateKeyPath(), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return errors.WithStack(err)
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(path.Join(c.BasePath, ".sftp/id_ecdsa")); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.generateECDSAPrivateKey(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ecdsaBytes, err := ioutil.ReadFile(path.Join(c.BasePath, ".sftp/id_ecdsa"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "sftp/server: could not read private key file")
|
|
||||||
}
|
|
||||||
ecdsaPrivateKey, err := ssh.ParsePrivateKey(ecdsaBytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(path.Join(c.BasePath, ".sftp/id_ed25519")); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.generateEd25519PrivateKey(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ed25519Bytes, err := ioutil.ReadFile(path.Join(c.BasePath, ".sftp/id_ed25519"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "sftp/server: could not read private key file")
|
|
||||||
}
|
|
||||||
ed25519PrivateKey, err := ssh.ParsePrivateKey(ed25519Bytes)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return []ssh.Signer{
|
|
||||||
rsaPrivateKey,
|
|
||||||
ecdsaPrivateKey,
|
|
||||||
ed25519PrivateKey,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateRSAPrivateKey generates a RSA-4096 private key that will be used by the SFTP server.
|
|
||||||
func (c *SFTPServer) generateRSAPrivateKey() error {
|
|
||||||
key, err := rsa.GenerateKey(rand.Reader, 4096)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("rsa")), 0o755); err != nil {
|
|
||||||
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
|
||||||
}
|
|
||||||
o, err := os.OpenFile(c.PrivateKeyPath("rsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
defer o.Close()
|
defer o.Close()
|
||||||
|
|
||||||
if err := pem.Encode(o, &pem.Block{
|
b, err := x509.MarshalPKCS8PrivateKey(priv)
|
||||||
Type: "RSA PRIVATE KEY",
|
if err != nil {
|
||||||
Bytes: x509.MarshalPKCS1PrivateKey(key),
|
return errors.Wrap(err, "sftp: failed to marshal private key into bytes")
|
||||||
}); err != nil {
|
}
|
||||||
return err
|
if err := pem.Encode(o, &pem.Block{Type: "PRIVATE KEY", Bytes: b}); err != nil {
|
||||||
|
return errors.Wrap(err, "sftp: failed to write ED25519 private key to disk")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateECDSAPrivateKey generates a ECDSA-P256 private key that will be used by the SFTP server.
|
func (c *SFTPServer) makeCredentialsRequest(conn ssh.ConnMetadata, t remote.SftpAuthRequestType, p string) (*ssh.Permissions, error) {
|
||||||
func (c *SFTPServer) generateECDSAPrivateKey() error {
|
|
||||||
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("ecdsa")), 0o755); err != nil {
|
|
||||||
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
|
||||||
}
|
|
||||||
o, err := os.OpenFile(c.PrivateKeyPath("ecdsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer o.Close()
|
|
||||||
|
|
||||||
privBytes, err := x509.MarshalPKCS8PrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pem.Encode(o, &pem.Block{
|
|
||||||
Type: "PRIVATE KEY",
|
|
||||||
Bytes: privBytes,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// generateEd25519PrivateKey generates an ed25519 private key that will be used by the SFTP server.
|
|
||||||
func (c *SFTPServer) generateEd25519PrivateKey() error {
|
|
||||||
_, key, err := ed25519.GenerateKey(rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("ed25519")), 0o755); err != nil {
|
|
||||||
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
|
||||||
}
|
|
||||||
o, err := os.OpenFile(c.PrivateKeyPath("ed25519"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer o.Close()
|
|
||||||
|
|
||||||
privBytes, err := x509.MarshalPKCS8PrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := pem.Encode(o, &pem.Block{
|
|
||||||
Type: "PRIVATE KEY",
|
|
||||||
Bytes: privBytes,
|
|
||||||
}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrivateKeyPath returns the path the host private key for this server instance.
|
|
||||||
func (c *SFTPServer) PrivateKeyPath(name string) string {
|
|
||||||
return path.Join(c.BasePath, ".sftp", "id_"+name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A function capable of validating user credentials with the Panel API.
|
|
||||||
func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
|
|
||||||
request := remote.SftpAuthRequest{
|
request := remote.SftpAuthRequest{
|
||||||
|
Type: t,
|
||||||
User: conn.User(),
|
User: conn.User(),
|
||||||
Pass: string(pass),
|
Pass: p,
|
||||||
IP: conn.RemoteAddr().String(),
|
IP: conn.RemoteAddr().String(),
|
||||||
SessionID: conn.SessionID(),
|
SessionID: conn.SessionID(),
|
||||||
ClientVersion: conn.ClientVersion(),
|
ClientVersion: conn.ClientVersion(),
|
||||||
Type: "password",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger := log.WithFields(log.Fields{"subsystem": "sftp", "username": conn.User(), "ip": conn.RemoteAddr().String()})
|
logger := log.WithFields(log.Fields{"subsystem": "sftp", "method": request.Type, "username": request.User, "ip": request.IP})
|
||||||
logger.Debug("validating credentials for SFTP connection")
|
logger.Debug("validating credentials for SFTP connection")
|
||||||
|
|
||||||
if !validUsernameRegexp.MatchString(request.User) {
|
if !validUsernameRegexp.MatchString(request.User) {
|
||||||
@@ -316,11 +208,6 @@ func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.
|
|||||||
return nil, &remote.SftpInvalidCredentialsError{}
|
return nil, &remote.SftpInvalidCredentialsError{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pass) < 1 {
|
|
||||||
logger.Warn("failed to validate user credentials (invalid format)")
|
|
||||||
return nil, &remote.SftpInvalidCredentialsError{}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := c.manager.Client().ValidateSftpCredentials(context.Background(), request)
|
resp, err := c.manager.Client().ValidateSftpCredentials(context.Background(), request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(*remote.SftpInvalidCredentialsError); ok {
|
if _, ok := err.(*remote.SftpInvalidCredentialsError); ok {
|
||||||
@@ -332,66 +219,19 @@ func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.
|
|||||||
}
|
}
|
||||||
|
|
||||||
logger.WithField("server", resp.Server).Debug("credentials validated and matched to server instance")
|
logger.WithField("server", resp.Server).Debug("credentials validated and matched to server instance")
|
||||||
sshPerm := &ssh.Permissions{
|
permissions := ssh.Permissions{
|
||||||
Extensions: map[string]string{
|
Extensions: map[string]string{
|
||||||
|
"ip": conn.RemoteAddr().String(),
|
||||||
"uuid": resp.Server,
|
"uuid": resp.Server,
|
||||||
"user": conn.User(),
|
"user": resp.User,
|
||||||
"permissions": strings.Join(resp.Permissions, ","),
|
"permissions": strings.Join(resp.Permissions, ","),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
return sshPerm, nil
|
return &permissions, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SFTPServer) publicKeyCallback(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
|
// PrivateKeyPath returns the path the host private key for this server instance.
|
||||||
request := remote.SftpAuthRequest{
|
func (c *SFTPServer) PrivateKeyPath() string {
|
||||||
User: conn.User(),
|
return path.Join(c.BasePath, ".sftp/id_ed25519")
|
||||||
Pass: "KEKW",
|
|
||||||
IP: conn.RemoteAddr().String(),
|
|
||||||
SessionID: conn.SessionID(),
|
|
||||||
ClientVersion: conn.ClientVersion(),
|
|
||||||
Type: "publicKey",
|
|
||||||
}
|
|
||||||
|
|
||||||
logger := log.WithFields(log.Fields{"subsystem": "sftp", "username": conn.User(), "ip": conn.RemoteAddr().String()})
|
|
||||||
logger.Debug("validating public key for SFTP connection")
|
|
||||||
|
|
||||||
if !validUsernameRegexp.MatchString(request.User) {
|
|
||||||
logger.Warn("failed to validate user credentials (invalid format)")
|
|
||||||
return nil, &remote.SftpInvalidCredentialsError{}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := c.manager.Client().ValidateSftpCredentials(context.Background(), request)
|
|
||||||
if err != nil {
|
|
||||||
if _, ok := err.(*remote.SftpInvalidCredentialsError); ok {
|
|
||||||
logger.Warn("failed to validate user credentials (invalid username or password)")
|
|
||||||
} else {
|
|
||||||
logger.WithField("error", err).Error("encountered an error while trying to validate user credentials")
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(resp.SSHKeys) < 1 {
|
|
||||||
return nil, &remote.SftpInvalidCredentialsError{}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range resp.SSHKeys {
|
|
||||||
storedPublicKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !bytes.Equal(key.Marshal(), storedPublicKey.Marshal()) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ssh.Permissions{
|
|
||||||
Extensions: map[string]string{
|
|
||||||
"uuid": resp.Server,
|
|
||||||
"user": conn.User(),
|
|
||||||
"permissions": strings.Join(resp.Permissions, ","),
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return nil, &remote.SftpInvalidCredentialsError{}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ type SinkPool struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
|
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
|
||||||
// server instance for it's full lifetime.
|
// server instance for its full lifetime.
|
||||||
func NewSinkPool() *SinkPool {
|
func NewSinkPool() *SinkPool {
|
||||||
return &SinkPool{}
|
return &SinkPool{}
|
||||||
}
|
}
|
||||||
|
|||||||
29
system/strings.go
Normal file
29
system/strings.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ipTrimRegex = regexp.MustCompile(`(:\d*)?$`)
|
||||||
|
|
||||||
|
const characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
|
||||||
|
|
||||||
|
// RandomString generates a random string of alpha-numeric characters using a
|
||||||
|
// pseudo-random number generator. The output of this function IS NOT cryptographically
|
||||||
|
// secure, it is used solely for generating random strings outside a security context.
|
||||||
|
func RandomString(n int) string {
|
||||||
|
var b strings.Builder
|
||||||
|
b.Grow(n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
b.WriteByte(characters[rand.Intn(len(characters))])
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrimIPSuffix removes the internal port value from an IP address to ensure we're only
|
||||||
|
// ever working directly with the IP address.
|
||||||
|
func TrimIPSuffix(s string) string {
|
||||||
|
return ipTrimRegex.ReplaceAllString(s, "")
|
||||||
|
}
|
||||||
9
wings.go
9
wings.go
@@ -1,9 +1,18 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/cmd"
|
"github.com/pterodactyl/wings/cmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
// Since we make use of the math/rand package in the code, especially for generating
|
||||||
|
// non-cryptographically secure random strings we need to seed the RNG. Just make use
|
||||||
|
// of the current time for this.
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
|
// Execute the main binary code.
|
||||||
cmd.Execute()
|
cmd.Execute()
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user