Merge branch 'develop' into v2

This commit is contained in:
Dane Everitt 2021-09-19 11:17:05 -07:00
commit 9e98287172
16 changed files with 248 additions and 103 deletions

View File

@ -60,7 +60,7 @@ jobs:
run: go test ./... run: go test ./...
- name: Upload Artifact - name: Upload Artifact
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
if: ${{ matrix.go == '^1.16' && (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') }} if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
with: with:
name: wings_${{ matrix.goos }}_${{ matrix.goarch }} name: wings_${{ matrix.goos }}_${{ matrix.goarch }}
path: build/wings_${{ matrix.goos }}_${{ matrix.goarch }} path: build/wings_${{ matrix.goos }}_${{ matrix.goarch }}

View File

@ -1,10 +1,18 @@
# Changelog # Changelog
## v1.5.1
### Added
* Global configuration option for toggling server crash detection (`system.crash_detection.enabled`)
* RPM specfile
## v1.5.0 ## v1.5.0
### Fixed ### Fixed
* Fixes a race condition when setting the application name in the console output for a server. * Fixes a race condition when setting the application name in the console output for a server.
* Fixes a server being reinstalled causing the `file_denylist` parameter for an Egg to be ignored until Wings is restarted. * Fixes a server being reinstalled causing the `file_denylist` parameter for an Egg to be ignored until Wings is restarted.
* Fixes YAML file parser not correctly setting boolean values. * Fixes YAML file parser not correctly setting boolean values.
* Fixes potential issue where the underlying websocket connection is closed but the parent request context is not yet canceled causing a write over a closed connection.
* Fixes race condition when closing all active websocket connections when a server is deleted.
* Fixes logic to determine if a server's context is closed out and send a websocket close message to connected clients. Previously this fired off whenever the request itself was closed, and not when the server context was closed.
### Added ### Added
* Exposes `8080` in the default Docker setup to better support proxy tools. * Exposes `8080` in the default Docker setup to better support proxy tools.

View File

@ -1,6 +1,7 @@
package cmd package cmd
import ( import (
"context"
"crypto/tls" "crypto/tls"
"errors" "errors"
"fmt" "fmt"
@ -209,7 +210,17 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
st = state st = state
} }
r, err := s.Environment.IsRunning() // Use a timed context here to avoid booting issues where Docker hangs for a
// specific container that would cause Wings to be un-bootable until the entire
// machine is rebooted. It is much better for us to just have a single failed
// server instance than an entire offline node.
//
// @see https://github.com/pterodactyl/panel/issues/2475
// @see https://github.com/pterodactyl/panel/issues/3358
ctx, cancel := context.WithTimeout(cmd.Context(), time.Second * 30)
defer cancel()
r, err := s.Environment.IsRunning(ctx)
// We ignore missing containers because we don't want to actually block booting of wings at this // We ignore missing containers because we don't want to actually block booting of wings at this
// point. If we didn't do this, and you pruned all the images and then started wings you could // point. If we didn't do this, and you pruned all the images and then started wings you could
// end up waiting a long period of time for all the images to be re-pulled on Wings boot rather // end up waiting a long period of time for all the images to be re-pulled on Wings boot rather
@ -238,7 +249,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
s.Log().Info("detected server is running, re-attaching to process...") s.Log().Info("detected server is running, re-attaching to process...")
s.Environment.SetState(environment.ProcessRunningState) s.Environment.SetState(environment.ProcessRunningState)
if err := s.Environment.Attach(); err != nil { if err := s.Environment.Attach(ctx); err != nil {
s.Log().WithField("error", err).Warn("failed to attach to running server environment") s.Log().WithField("error", err).Warn("failed to attach to running server environment")
} }
} else { } else {

View File

@ -48,10 +48,12 @@ var DefaultTLSConfig = &tls.Config{
CurvePreferences: []tls.CurveID{tls.X25519, tls.CurveP256}, CurvePreferences: []tls.CurveID{tls.X25519, tls.CurveP256},
} }
var mu sync.RWMutex var (
var _config *Configuration mu sync.RWMutex
var _jwtAlgo *jwt.HMACSHA _config *Configuration
var _debugViaFlag bool _jwtAlgo *jwt.HMACSHA
_debugViaFlag bool
)
// Locker specific to writing the configuration to the disk, this happens // Locker specific to writing the configuration to the disk, this happens
// in areas that might already be locked, so we don't want to crash the process. // in areas that might already be locked, so we don't want to crash the process.
@ -181,6 +183,9 @@ type SystemConfiguration struct {
} }
type CrashDetection struct { type CrashDetection struct {
// CrashDetectionEnabled sets if crash detection is enabled globally for all servers on this node.
CrashDetectionEnabled bool `default:"true" yaml:"enabled"`
// Determines if Wings should detect a server that stops with a normal exit code of // Determines if Wings should detect a server that stops with a normal exit code of
// "0" as being crashed if the process stopped without any Wings interaction. E.g. // "0" as being crashed if the process stopped without any Wings interaction. E.g.
// the user did not press the stop button, but the process stopped cleanly. // the user did not press the stop button, but the process stopped cleanly.
@ -375,7 +380,7 @@ func WriteToDisk(c *Configuration) error {
if err != nil { if err != nil {
return err return err
} }
if err := ioutil.WriteFile(c.path, b, 0600); err != nil { if err := ioutil.WriteFile(c.path, b, 0o600); err != nil {
return err return err
} }
return nil return nil
@ -470,7 +475,7 @@ func FromFile(path string) error {
func ConfigureDirectories() error { func ConfigureDirectories() error {
root := _config.System.RootDirectory root := _config.System.RootDirectory
log.WithField("path", root).Debug("ensuring root data directory exists") log.WithField("path", root).Debug("ensuring root data directory exists")
if err := os.MkdirAll(root, 0700); err != nil { if err := os.MkdirAll(root, 0o700); err != nil {
return err return err
} }
@ -491,17 +496,17 @@ func ConfigureDirectories() error {
} }
log.WithField("path", _config.System.Data).Debug("ensuring server data directory exists") log.WithField("path", _config.System.Data).Debug("ensuring server data directory exists")
if err := os.MkdirAll(_config.System.Data, 0700); err != nil { if err := os.MkdirAll(_config.System.Data, 0o700); err != nil {
return err return err
} }
log.WithField("path", _config.System.ArchiveDirectory).Debug("ensuring archive data directory exists") log.WithField("path", _config.System.ArchiveDirectory).Debug("ensuring archive data directory exists")
if err := os.MkdirAll(_config.System.ArchiveDirectory, 0700); err != nil { if err := os.MkdirAll(_config.System.ArchiveDirectory, 0o700); err != nil {
return err return err
} }
log.WithField("path", _config.System.BackupDirectory).Debug("ensuring backup data directory exists") log.WithField("path", _config.System.BackupDirectory).Debug("ensuring backup data directory exists")
if err := os.MkdirAll(_config.System.BackupDirectory, 0700); err != nil { if err := os.MkdirAll(_config.System.BackupDirectory, 0o700); err != nil {
return err return err
} }

View File

@ -45,7 +45,7 @@ func (nw noopWriter) Write(b []byte) (int, error) {
// Calling this function will poll resources for the container in the background // Calling this function will poll resources for the container in the background
// until the provided context is canceled by the caller. Failure to cancel said // until the provided context is canceled by the caller. Failure to cancel said
// context will cause background memory leaks as the goroutine will not exit. // context will cause background memory leaks as the goroutine will not exit.
func (e *Environment) Attach() error { func (e *Environment) Attach(ctx context.Context) error {
if e.IsAttached() { if e.IsAttached() {
return nil return nil
} }
@ -62,14 +62,17 @@ func (e *Environment) Attach() error {
} }
// Set the stream again with the container. // Set the stream again with the container.
if st, err := e.client.ContainerAttach(context.Background(), e.Id, opts); err != nil { if st, err := e.client.ContainerAttach(ctx, e.Id, opts); err != nil {
return err return err
} else { } else {
e.SetStream(&st) e.SetStream(&st)
} }
go func() { go func() {
ctx, cancel := context.WithCancel(context.Background()) // Don't use the context provided to the function, that'll cause the polling to
// exit unexpectedly. We want a custom context for this, the one passed to the
// function is to avoid a hang situation when trying to attach to a container.
pollCtx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
defer e.stream.Close() defer e.stream.Close()
defer func() { defer func() {
@ -78,7 +81,7 @@ func (e *Environment) Attach() error {
}() }()
go func() { go func() {
if err := e.pollResources(ctx); err != nil { if err := e.pollResources(pollCtx); err != nil {
if !errors.Is(err, context.Canceled) { if !errors.Is(err, context.Canceled) {
e.log().WithField("error", err).Error("error during environment resource polling") e.log().WithField("error", err).Error("error during environment resource polling")
} else { } else {

View File

@ -128,20 +128,20 @@ func (e *Environment) Exists() (bool, error) {
return true, nil return true, nil
} }
// Determines if the server's docker container is currently running. If there is no container // IsRunning determines if the server's docker container is currently running.
// present, an error will be raised (since this shouldn't be a case that ever happens under // If there is no container present, an error will be raised (since this
// correctly developed circumstances). // shouldn't be a case that ever happens under correctly developed
// circumstances).
// //
// You can confirm if the instance wasn't found by using client.IsErrNotFound from the Docker // You can confirm if the instance wasn't found by using client.IsErrNotFound
// API. // from the Docker API.
// //
// @see docker/client/errors.go // @see docker/client/errors.go
func (e *Environment) IsRunning() (bool, error) { func (e *Environment) IsRunning(ctx context.Context) (bool, error) {
c, err := e.client.ContainerInspect(context.Background(), e.Id) c, err := e.client.ContainerInspect(ctx, e.Id)
if err != nil { if err != nil {
return false, err return false, err
} }
return c.State.Running, nil return c.State.Running, nil
} }

View File

@ -17,16 +17,17 @@ import (
"github.com/pterodactyl/wings/remote" "github.com/pterodactyl/wings/remote"
) )
// Run before the container starts and get the process configuration from the Panel. // OnBeforeStart run before the container starts and get the process
// This is important since we use this to check configuration files as well as ensure // configuration from the Panel. This is important since we use this to check
// we always have the latest version of an egg available for server processes. // configuration files as well as ensure we always have the latest version of
// an egg available for server processes.
// //
// This process will also confirm that the server environment exists and is in a bootable // This process will also confirm that the server environment exists and is in
// state. This ensures that unexpected container deletion while Wings is running does // a bootable state. This ensures that unexpected container deletion while Wings
// not result in the server becoming un-bootable. // is running does not result in the server becoming un-bootable.
func (e *Environment) OnBeforeStart() error { func (e *Environment) OnBeforeStart(ctx context.Context) error {
// Always destroy and re-create the server container to ensure that synced data from the Panel is used. // Always destroy and re-create the server container to ensure that synced data from the Panel is used.
if err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil { if err := e.client.ContainerRemove(ctx, e.Id, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
if !client.IsErrNotFound(err) { if !client.IsErrNotFound(err) {
return errors.WrapIf(err, "environment/docker: failed to remove container during pre-boot") return errors.WrapIf(err, "environment/docker: failed to remove container during pre-boot")
} }
@ -46,10 +47,10 @@ func (e *Environment) OnBeforeStart() error {
return nil return nil
} }
// Starts the server environment and begins piping output to the event listeners for the // Start will start the server environment and begins piping output to the event
// console. If a container does not exist, or needs to be rebuilt that will happen in the // listeners for the console. If a container does not exist, or needs to be
// call to OnBeforeStart(). // rebuilt that will happen in the call to OnBeforeStart().
func (e *Environment) Start() error { func (e *Environment) Start(ctx context.Context) error {
sawError := false sawError := false
// If sawError is set to true there was an error somewhere in the pipeline that // If sawError is set to true there was an error somewhere in the pipeline that
@ -65,7 +66,7 @@ func (e *Environment) Start() error {
} }
}() }()
if c, err := e.client.ContainerInspect(context.Background(), e.Id); err != nil { if c, err := e.client.ContainerInspect(ctx, e.Id); err != nil {
// Do nothing if the container is not found, we just don't want to continue // Do nothing if the container is not found, we just don't want to continue
// to the next block of code here. This check was inlined here to guard against // to the next block of code here. This check was inlined here to guard against
// a nil-pointer when checking c.State below. // a nil-pointer when checking c.State below.
@ -79,7 +80,7 @@ func (e *Environment) Start() error {
if c.State.Running { if c.State.Running {
e.SetState(environment.ProcessRunningState) e.SetState(environment.ProcessRunningState)
return e.Attach() return e.Attach(ctx)
} }
// Truncate the log file, so we don't end up outputting a bunch of useless log information // Truncate the log file, so we don't end up outputting a bunch of useless log information
@ -101,21 +102,23 @@ func (e *Environment) Start() error {
// Run the before start function and wait for it to finish. This will validate that the container // Run the before start function and wait for it to finish. This will validate that the container
// exists on the system, and rebuild the container if that is required for server booting to // exists on the system, and rebuild the container if that is required for server booting to
// occur. // occur.
if err := e.OnBeforeStart(); err != nil { if err := e.OnBeforeStart(ctx); err != nil {
return errors.WithStackIf(err) return errors.WithStackIf(err)
} }
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) // If we cannot start & attach to the container in 30 seconds something has gone
// quite sideways and we should stop trying to avoid a hanging situation.
actx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel() defer cancel()
if err := e.client.ContainerStart(ctx, e.Id, types.ContainerStartOptions{}); err != nil { if err := e.client.ContainerStart(actx, e.Id, types.ContainerStartOptions{}); err != nil {
return errors.WrapIf(err, "environment/docker: failed to start container") return errors.WrapIf(err, "environment/docker: failed to start container")
} }
// No errors, good to continue through. // No errors, good to continue through.
sawError = false sawError = false
return e.Attach() return e.Attach(actx)
} }
// Stop stops the container that the server is running in. This will allow up to // Stop stops the container that the server is running in. This will allow up to

View File

@ -1,6 +1,7 @@
package environment package environment
import ( import (
"context"
"os" "os"
"github.com/pterodactyl/wings/events" "github.com/pterodactyl/wings/events"
@ -41,9 +42,9 @@ type ProcessEnvironment interface {
// a basic CLI environment this can probably just return true right away. // a basic CLI environment this can probably just return true right away.
Exists() (bool, error) Exists() (bool, error)
// Determines if the environment is currently active and running a server process // IsRunning determines if the environment is currently active and running
// for this specific server instance. // a server process for this specific server instance.
IsRunning() (bool, error) IsRunning(ctx context.Context) (bool, error)
// Performs an update of server resource limits without actually stopping the server // Performs an update of server resource limits without actually stopping the server
// process. This only executes if the environment supports it, otherwise it is // process. This only executes if the environment supports it, otherwise it is
@ -52,11 +53,11 @@ type ProcessEnvironment interface {
// Runs before the environment is started. If an error is returned starting will // Runs before the environment is started. If an error is returned starting will
// not occur, otherwise proceeds as normal. // not occur, otherwise proceeds as normal.
OnBeforeStart() error OnBeforeStart(ctx context.Context) error
// Starts a server instance. If the server instance is not in a state where it // Starts a server instance. If the server instance is not in a state where it
// can be started an error should be returned. // can be started an error should be returned.
Start() error Start(ctx context.Context) error
// Stops a server instance. If the server is already stopped an error should // Stops a server instance. If the server is already stopped an error should
// not be returned. // not be returned.
@ -84,10 +85,10 @@ type ProcessEnvironment interface {
// server. // server.
Create() error Create() error
// Attaches to the server console environment and allows piping the output to a // Attach attaches to the server console environment and allows piping the output
// websocket or other internal tool to monitor output. Also allows you to later // to a websocket or other internal tool to monitor output. Also allows you to later
// send data into the environment's stdin. // send data into the environment's stdin.
Attach() error Attach(ctx context.Context) error
// Sends the provided command to the running server instance. // Sends the provided command to the running server instance.
SendCommand(string) error SendCommand(string) error

View File

@ -101,7 +101,7 @@ func postServerPower(c *gin.Context) {
func postServerCommands(c *gin.Context) { func postServerCommands(c *gin.Context) {
s := ExtractServer(c) s := ExtractServer(c)
if running, err := s.Environment.IsRunning(); err != nil { if running, err := s.Environment.IsRunning(c.Request.Context()); err != nil {
NewServerError(err, s).Abort(c) NewServerError(err, s).Abort(c)
return return
} else if !running { } else if !running {

View File

@ -12,6 +12,14 @@ import (
"github.com/pterodactyl/wings/router/websocket" "github.com/pterodactyl/wings/router/websocket"
) )
var expectedCloseCodes = []int{
ws.CloseGoingAway,
ws.CloseAbnormalClosure,
ws.CloseNormalClosure,
ws.CloseNoStatusReceived,
ws.CloseServiceRestart,
}
// Upgrades a connection to a websocket and passes events along between. // Upgrades a connection to a websocket and passes events along between.
func getServerWebsocket(c *gin.Context) { func getServerWebsocket(c *gin.Context) {
manager := middleware.ExtractManager(c) manager := middleware.ExtractManager(c)
@ -24,8 +32,10 @@ func getServerWebsocket(c *gin.Context) {
defer handler.Connection.Close() defer handler.Connection.Close()
// Create a context that can be canceled when the user disconnects from this // Create a context that can be canceled when the user disconnects from this
// socket that will also cancel listeners running in separate threads. // socket that will also cancel listeners running in separate threads. If the
ctx, cancel := context.WithCancel(context.Background()) // connection itself is terminated listeners using this context will also be
// closed.
ctx, cancel := context.WithCancel(c.Request.Context())
defer cancel() defer cancel()
// Track this open connection on the server so that we can close them all programmatically // Track this open connection on the server so that we can close them all programmatically
@ -33,22 +43,19 @@ func getServerWebsocket(c *gin.Context) {
s.Websockets().Push(handler.Uuid(), &cancel) s.Websockets().Push(handler.Uuid(), &cancel)
defer s.Websockets().Remove(handler.Uuid()) defer s.Websockets().Remove(handler.Uuid())
// Listen for the context being canceled and then close the websocket connection. This normally // If the server is deleted we need to send a close message to the connected client
// just happens because you're disconnecting from the socket in the browser, however in some // so that they disconnect since there will be no more events sent along. Listen for
// cases we close the connections programmatically (e.g. deleting the server) and need to send // the request context being closed to break this loop, otherwise this routine will
// a close message to the websocket so it disconnects. // be left hanging in the background.
go func(ctx context.Context, c *ws.Conn) { go func() {
ListenerLoop:
for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
break
case <-s.Context().Done():
handler.Connection.WriteControl(ws.CloseMessage, ws.FormatCloseMessage(ws.CloseGoingAway, "server deleted"), time.Now().Add(time.Second*5)) handler.Connection.WriteControl(ws.CloseMessage, ws.FormatCloseMessage(ws.CloseGoingAway, "server deleted"), time.Now().Add(time.Second*5))
// A break right here without defining the specific loop would only break the select break
// and not actually break the for loop, thus causing this routine to stick around forever.
break ListenerLoop
} }
} }()
}(ctx, handler.Connection)
go handler.ListenForServerEvents(ctx) go handler.ListenForServerEvents(ctx)
go handler.ListenForExpiration(ctx) go handler.ListenForExpiration(ctx)
@ -58,14 +65,7 @@ func getServerWebsocket(c *gin.Context) {
_, p, err := handler.Connection.ReadMessage() _, p, err := handler.Connection.ReadMessage()
if err != nil { if err != nil {
if !ws.IsCloseError( if ws.IsUnexpectedCloseError(err, expectedCloseCodes...) {
err,
ws.CloseNormalClosure,
ws.CloseGoingAway,
ws.CloseNoStatusReceived,
ws.CloseServiceRestart,
ws.CloseAbnormalClosure,
) {
s.Log().WithField("error", err).Warn("error handling websocket message for server") s.Log().WithField("error", err).Warn("error handling websocket message for server")
} }
break break

View File

@ -8,16 +8,14 @@ import (
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
) )
// Checks the time to expiration on the JWT every 30 seconds until the token has // ListenForExpiration checks the time to expiration on the JWT every 30 seconds
// expired. If we are within 3 minutes of the token expiring, send a notice over // until the token has expired. If we are within 3 minutes of the token expiring,
// the socket that it is expiring soon. If it has expired, send that notice as well. // send a notice over the socket that it is expiring soon. If it has expired,
// send that notice as well.
func (h *Handler) ListenForExpiration(ctx context.Context) { func (h *Handler) ListenForExpiration(ctx context.Context) {
// Make a ticker and completion channel that is used to continuously poll the // Make a ticker and completion channel that is used to continuously poll the
// JWT stored in the session to send events to the socket when it is expiring. // JWT stored in the session to send events to the socket when it is expiring.
ticker := time.NewTicker(time.Second * 30) ticker := time.NewTicker(time.Second * 30)
// Whenever this function is complete, end the ticker, close out the channel,
// and then close the websocket connection.
defer ticker.Stop() defer ticker.Stop()
for { for {
@ -51,8 +49,9 @@ var e = []string{
server.TransferStatusEvent, server.TransferStatusEvent,
} }
// Listens for different events happening on a server and sends them along // ListenForServerEvents will listen for different events happening on a server
// to the connected websocket. // and send them along to the connected websocket client. This function will
// block until the context provided to it is canceled.
func (h *Handler) ListenForServerEvents(ctx context.Context) { func (h *Handler) ListenForServerEvents(ctx context.Context) {
h.server.Log().Debug("listening for server events over websocket") h.server.Log().Debug("listening for server events over websocket")
callback := func(e events.Event) { callback := func(e events.Event) {
@ -67,13 +66,10 @@ func (h *Handler) ListenForServerEvents(ctx context.Context) {
h.server.Events().On(evt, &callback) h.server.Events().On(evt, &callback)
} }
go func(ctx context.Context) { <-ctx.Done()
select { // Block until the context is stopped and then de-register all of the event listeners
case <-ctx.Done(): // that we registered earlier.
// Once this context is stopped, de-register all of the listeners that have been registered.
for _, evt := range e { for _, evt := range e {
h.server.Events().Off(evt, &callback) h.server.Events().Off(evt, &callback)
} }
} }
}(ctx)
}

View File

@ -368,7 +368,9 @@ func (h *Handler) HandleInbound(m Message) error {
} }
case SendServerLogsEvent: case SendServerLogsEvent:
{ {
if running, _ := h.server.Environment.IsRunning(); !running { ctx, cancel := context.WithTimeout(context.Background(), time.Second * 5)
defer cancel()
if running, _ := h.server.Environment.IsRunning(ctx); !running {
return nil return nil
} }

114
rpm/ptero-wings.spec Normal file
View File

@ -0,0 +1,114 @@
Name: ptero-wings
Version: 1.5.0
Release: 1%{?dist}
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
BuildArch: x86_64
License: MIT
URL: https://github.com/pterodactyl/wings
Source0: https://github.com/pterodactyl/wings/releases/download/v%{version}/wings_linux_amd64
%if 0%{?rhel} && 0%{?rhel} <= 8
BuildRequires: systemd
%else
BuildRequires: systemd-rpm-macros
%endif
%description
Wings is Pterodactyl's server control plane, built for the rapidly
changing gaming industry and designed to be highly performant and
secure. Wings provides an HTTP API allowing you to interface directly
with running server instances, fetch server logs, generate backups,
and control all aspects of the server lifecycle.
In addition, Wings ships with a built-in SFTP server allowing your
system to remain free of Pterodactyl specific dependencies, and
allowing users to authenticate with the same credentials they would
normally use to access the Panel.
%prep
%build
#nothing required
%install
mkdir -p %{buildroot}%{_bindir}
mkdir -p %{buildroot}%{_unitdir}
cp %{_sourcedir}/wings_linux_amd64 %{buildroot}%{_bindir}/wings
cat > %{buildroot}%{_unitdir}/wings.service << EOF
[Unit]
Description=Pterodactyl Wings Daemon
After=docker.service
Requires=docker.service
PartOf=docker.service
StartLimitIntervalSec=600
[Service]
WorkingDirectory=/etc/pterodactyl
ExecStart=/usr/bin/wings
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
StartLimitInterval=180
StartLimitBurst=30
RestartSec=5s
[Install]
WantedBy=multi-user.target
EOF
%files
%attr(0755, root, root) %{_prefix}/bin/wings
%attr(0644, root, root) %{_unitdir}/wings.service
%post
# Reload systemd
systemctl daemon-reload
# Create the required directory structure
mkdir -p /etc/pterodactyl
mkdir -p /var/lib/pterodactyl/{archives,backups,volumes}
mkdir -p /var/log/pterodactyl/install
%preun
systemctl is-active %{name} >/dev/null 2>&1
if [ $? -eq 0 ]; then
systemctl stop %{name}
fi
systemctl is-enabled %{name} >/dev/null 2>&1
if [ $? -eq 0 ]; then
systemctl disable %{name}
fi
%postun
rm -rf /var/log/pterodactyl
%verifyscript
wings --version
%changelog
* Sun Sep 12 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.0-1
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.0
- Fixes a race condition when setting the application name in the console output for a server.
- Fixes a server being reinstalled causing the file_denylist parameter for an Egg to be ignored until Wings is restarted.
- Fixes YAML file parser not correctly setting boolean values.
- Fixes potential issue where the underlying websocket connection is closed but the parent request context is not yet canceled causing a write over a closed connection.
- Fixes race condition when closing all active websocket connections when a server is deleted.
- Fixes logic to determine if a server's context is closed out and send a websocket close message to connected clients. Previously this fired off whenever the request itself was closed, and not when the server context was closed.
- Exposes 8080 in the wings Dockerfile to better support reverse proxy tools.
- Releases are now built using Go 1.17 the minimum version required to build Wings remains Go 1.16.
- Simplifed the logic powering server updates to only pull information from the Panel rather than trying to accept updated values. All parts of Wings needing the most up-to-date server details should call Server#Sync() to fetch the latest stored build information.
- Installer#New() no longer requires passing all of the server data as a byte slice, rather a new Installer#ServerDetails struct is exposed which can be passed and accepts a UUID and if the server should be started after the installer finishes.
- Removes complicated (and unused) logic during the server installation process that was a hold-over from legacy Wings architectures.
- Removes the PATCH /api/servers/:server endpoint if you were previously using this API call it should be replaced with POST /api/servers/:server/sync.
* Wed Aug 25 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.4.7-1
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.4.7
- SFTP access is now properly denied if a server is suspended.
- Correctly uses start_on_completion and crash_detection_enabled for servers.

View File

@ -128,7 +128,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return err return err
} }
return s.Environment.Start() return s.Environment.Start(s.Context())
case PowerActionStop: case PowerActionStop:
// We're specifically waiting for the process to be stopped here, otherwise the lock is released // We're specifically waiting for the process to be stopped here, otherwise the lock is released
// too soon, and you can rack up all sorts of issues. // too soon, and you can rack up all sorts of issues.
@ -151,7 +151,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return err return err
} }
return s.Environment.Start() return s.Environment.Start(s.Context())
case PowerActionTerminate: case PowerActionTerminate:
return s.Environment.Terminate(os.Kill) return s.Environment.Terminate(os.Kill)
} }

View File

@ -188,7 +188,9 @@ func (s *Server) Sync() error {
// can be called from scoped where the server may not be fully initialized, // can be called from scoped where the server may not be fully initialized,
// therefore other things like the filesystem and environment may not exist yet. // therefore other things like the filesystem and environment may not exist yet.
func (s *Server) SyncWithConfiguration(cfg remote.ServerConfigurationResponse) error { func (s *Server) SyncWithConfiguration(cfg remote.ServerConfigurationResponse) error {
c := Configuration{} c := Configuration{
CrashDetectionEnabled: config.Get().System.CrashDetection.CrashDetectionEnabled,
}
if err := json.Unmarshal(cfg.Settings, &c); err != nil { if err := json.Unmarshal(cfg.Settings, &c); err != nil {
return errors.WithStackIf(err) return errors.WithStackIf(err)
} }
@ -196,9 +198,9 @@ func (s *Server) SyncWithConfiguration(cfg remote.ServerConfigurationResponse) e
s.cfg.mu.Lock() s.cfg.mu.Lock()
defer s.cfg.mu.Unlock() defer s.cfg.mu.Unlock()
// Lock the new configuration. Since we have the defered Unlock above we need // Lock the new configuration. Since we have the deferred Unlock above we need
// to make sure that the NEW configuration object is already locked since that // to make sure that the NEW configuration object is already locked since that
// defer is running on the memory address for "s.cfg.mu" which we're explcitly // defer is running on the memory address for "s.cfg.mu" which we're explicitly
// changing on the next line. // changing on the next line.
c.mu.Lock() c.mu.Lock()
@ -259,7 +261,7 @@ func (s *Server) EnsureDataDirectoryExists() error {
if _, err := os.Lstat(s.fs.Path()); err != nil { if _, err := os.Lstat(s.fs.Path()); err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
s.Log().Debug("server: creating root directory and setting permissions") s.Log().Debug("server: creating root directory and setting permissions")
if err := os.MkdirAll(s.fs.Path(), 0700); err != nil { if err := os.MkdirAll(s.fs.Path(), 0o700); err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} }
if err := s.fs.Chown("/"); err != nil { if err := s.fs.Chown("/"); err != nil {

View File

@ -44,11 +44,11 @@ func (w *WebsocketBag) Remove(u uuid.UUID) {
w.mu.Unlock() w.mu.Unlock()
} }
// CancelAll cancels all the stored cancel functions which has the effect of disconnecting // CancelAll cancels all the stored cancel functions which has the effect of
// every listening websocket for the server. // disconnecting every listening websocket for the server.
func (w *WebsocketBag) CancelAll() { func (w *WebsocketBag) CancelAll() {
w.mu.Lock() w.mu.Lock()
w.mu.Unlock() defer w.mu.Unlock()
if w.conns != nil { if w.conns != nil {
for _, cancel := range w.conns { for _, cancel := range w.conns {