Compare commits

...

39 Commits

Author SHA1 Message Date
Dane Everitt
a9c81f37b2 Code cleanup for server mounts; mount more timezone data; closes pterodactyl/panel#2073 2020-08-01 20:24:43 -07:00
Dane Everitt
9d350d845f Merge branch 'develop' of https://github.com/pterodactyl/wings into develop 2020-08-01 16:53:28 -07:00
Dane Everitt
8b0b40e377 Update router_server_files.go 2020-08-01 16:53:14 -07:00
Dane Everitt
fef3b001c1 Merge pull request #44 from pterodactyl/feature/file-uploads
Add a upload file endpoint
2020-08-01 15:35:28 -07:00
Dane Everitt
834bcf251e Avoid race conditions due to stream not being completely detached correctly 2020-08-01 15:34:14 -07:00
Dane Everitt
78c5fd219a Remove use of semaphore while restarting; covered by changed power actions handling 2020-08-01 15:22:39 -07:00
Dane Everitt
177aa8e436 Refactor power handling logic to be more robust and able to handle spam clicking and duplicate power actions 2020-08-01 15:20:39 -07:00
Dane Everitt
ecb2cb05ce Rewrite the file/dir chowing to be less intense on the system and use walker function 2020-07-31 22:06:00 -07:00
Dane Everitt
64df3e168f Replace wg with workerpool 2020-07-31 21:56:44 -07:00
Dane Everitt
881cb84605 Actually set file permissions for servers when booting the daemon 2020-07-31 21:55:30 -07:00
Dane Everitt
03ef52c0db More use of workerpools for loading servers from the API 2020-07-31 21:40:43 -07:00
Dane Everitt
f889a193bf Use NumCPU and not GOMAXPROCS 2020-07-31 21:31:53 -07:00
Dane Everitt
5366d0f652 Use a workerpool for configuration file updates
Co-Authored-By: Jakob <schrej@users.noreply.github.com>
2020-07-31 21:25:57 -07:00
Dane Everitt
5e8425ad6a Code cleanup; use a worker pool for updating file permissions to avoid run-away go-routines
Co-Authored-By: Jakob <schrej@users.noreply.github.com>
2020-07-31 21:14:49 -07:00
Dane Everitt
38efb68e8a Merge branch 'develop' of https://github.com/pterodactyl/wings into develop 2020-07-31 21:02:27 -07:00
Dane Everitt
cf33a2464a Code cleanup for event listening and publishing
Co-Authored-By: Jakob <schrej@users.noreply.github.com>
2020-07-31 21:02:25 -07:00
Dane Everitt
cecc72110c Merge pull request #46 from pterodactyl/feature/docker-repo-auth
Add Docker registry authentication
2020-07-31 20:27:35 -07:00
Matthew Penner
8f1ebdd39f Replace errors.Wrap with errors.WithStack 2020-07-31 21:22:01 -06:00
Matthew Penner
776062107b Force upload file overwrite and remove un-needed TODO comments 2020-07-31 21:15:36 -06:00
Dane Everitt
da26b4c5c7 Merge pull request #41 from pterodactyl/fix/race-1
Fix a possible race condition when Unsubscribing from the EventBus
2020-07-31 20:11:21 -07:00
Dane Everitt
5889d0585b Merge pull request #45 from pterodactyl/fix/disk-usage
Force disk usage calculation when loading servers
2020-07-31 20:06:58 -07:00
Dane Everitt
8af26ac864 Merge pull request #43 from pterodactyl/impl/2194
Add the ability to define additional allowed origins
2020-07-31 20:04:10 -07:00
Dane Everitt
d3843e1d28 Merge pull request #42 from pterodactyl/issue/2200
Fix 500 errors on some file routes when accessing a file that doesn't exist
2020-07-31 20:02:39 -07:00
Matthew Penner
5b999db7f3 Add Docker registry authentication 2020-07-31 18:28:40 -06:00
Matthew Penner
afa6fb200e Force disk usage calculation when loading servers 2020-07-31 17:01:02 -06:00
Matthew Penner
b1940426c3 Merge develop into feature/file-uploads 2020-07-31 16:31:06 -06:00
Matthew Penner
43795a4be3 Document config options 2020-07-31 16:21:27 -06:00
Matthew Penner
b811d2474e Add the ability to define additional allowed origins 2020-07-31 16:19:09 -06:00
Matthew Penner
e85b1cecb7 Fix 500 errors on file routes when accessing a file that doesn't exist 2020-07-31 16:01:32 -06:00
Matthew Penner
5036077152 Hopefully fix a possible race condition when Unsubscribing from the EventBus while an event is being Published 2020-07-29 23:18:11 -06:00
Dane Everitt
373dbd355e Better handling of subscribers to avoid a slice panic 2020-07-29 21:56:22 -07:00
Dane Everitt
7f9ec4402a Add emitters for install started/stopped 2020-07-29 21:39:27 -07:00
Dane Everitt
f0d6f67c6b Fix memory leak with websocket not removing unused listeners 2020-07-29 21:39:17 -07:00
Dane Everitt
0b761320cc Fix error handling to be more accurate in the stacks 2020-07-29 20:54:26 -07:00
Dane Everitt
db0dc17937 Fix exception when writing install logs 2020-07-29 20:54:15 -07:00
Dane Everitt
79ee259874 correctly return server resource stats; closes pterodactyl/panel#2183 2020-07-29 20:34:30 -07:00
Dane Everitt
4d8f06a3e0 Use brute 2020-07-19 19:16:01 -07:00
Dane Everitt
f567c2c15c Use the right files 2020-07-19 18:40:35 -07:00
Matthew Penner
7a6397bf17 Add basic file upload support 2020-07-12 16:43:25 -06:00
26 changed files with 629 additions and 405 deletions

View File

@@ -23,7 +23,7 @@ jobs:
run: go test ./... run: go test ./...
- name: Compress binary and make it executable - name: Compress binary and make it executable
run: upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64 run: upx --brute build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
- name: Extract changelog - name: Extract changelog
env: env:
@@ -32,7 +32,6 @@ jobs:
sed -n "/^## ${REF:10}/,/^## /{/^## /b;p}" CHANGELOG.md > ./RELEASE_CHANGELOG sed -n "/^## ${REF:10}/,/^## /{/^## /b;p}" CHANGELOG.md > ./RELEASE_CHANGELOG
echo ::set-output name=version_name::`sed -nr "s/^## (${REF:10} .*)$/\1/p" CHANGELOG.md` echo ::set-output name=version_name::`sed -nr "s/^## (${REF:10} .*)$/\1/p" CHANGELOG.md`
- name: Create checksum and add to changelog - name: Create checksum and add to changelog
run: | run: |
SUM=`cd build && sha256sum wings_linux_amd64` SUM=`cd build && sha256sum wings_linux_amd64`
@@ -48,8 +47,8 @@ jobs:
git config --local user.name "Pterodactyl CI" git config --local user.name "Pterodactyl CI"
git checkout -b $BRANCH git checkout -b $BRANCH
git push -u origin $BRANCH git push -u origin $BRANCH
sed -i "s/ Version = \".*\"/ Version = \"${REF:11}\"/" config/app.php sed -i "s/ Version = \".*\"/ Version = \"${REF:11}\"/" system/const.go
git add config/app.php git add system/const.go
git commit -m "bump version for release" git commit -m "bump version for release"
git push git push

View File

@@ -3,6 +3,7 @@ package cmd
import ( import (
"crypto/tls" "crypto/tls"
"fmt" "fmt"
"github.com/gammazero/workerpool"
"net/http" "net/http"
"os" "os"
"path" "path"
@@ -21,7 +22,6 @@ import (
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/sftp" "github.com/pterodactyl/wings/sftp"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
"github.com/remeh/sizedwaitgroup"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"go.uber.org/zap" "go.uber.org/zap"
) )
@@ -146,13 +146,6 @@ func rootCmdRun(*cobra.Command, []string) {
}).Info("configured system user successfully") }).Info("configured system user successfully")
} }
log.Info("beginning file permission setting on server data directories")
if err := c.EnsureFilePermissions(); err != nil {
log.WithField("error", err).Error("failed to properly chown data directories")
} else {
log.Info("finished ensuring file permissions")
}
if err := server.LoadDirectory(); err != nil { if err := server.LoadDirectory(); err != nil {
log.WithField("error", err).Fatal("failed to load server configurations") log.WithField("error", err).Fatal("failed to load server configurations")
return return
@@ -172,19 +165,27 @@ func rootCmdRun(*cobra.Command, []string) {
log.WithField("server", s.Id()).Info("loaded configuration for server") log.WithField("server", s.Id()).Info("loaded configuration for server")
} }
// Create a new WaitGroup that limits us to 4 servers being bootstrapped at a time if !c.System.SetPermissionsOnBoot {
log.Warn("server file permission checking is currently disabled on boot!")
}
// Create a new workerpool that limits us to 4 servers being bootstrapped at a time
// on Wings. This allows us to ensure the environment exists, write configurations, // on Wings. This allows us to ensure the environment exists, write configurations,
// and reboot processes without causing a slow-down due to sequential booting. // and reboot processes without causing a slow-down due to sequential booting.
wg := sizedwaitgroup.New(4) pool := workerpool.New(4)
for _, serv := range server.GetServers().All() { for _, serv := range server.GetServers().All() {
wg.Add() s := serv
go func(s *server.Server) { pool.Submit(func() {
defer wg.Done() if c.System.SetPermissionsOnBoot {
s.Log().Info("chowning server data directory")
if err := s.Filesystem.Chown("/"); err != nil {
s.Log().WithField("error", err).Warn("error during server data directory chown")
}
}
s.Log().Info("ensuring server environment exists") s.Log().Info("ensuring server environment exists")
// Create a server environment if none exists currently. This allows us to recover from Docker // Create a server environment if none exists currently. This allows us to recover from Docker
// being reinstalled on the host system for example. // being reinstalled on the host system for example.
if err := s.Environment.Create(); err != nil { if err := s.Environment.Create(); err != nil {
@@ -204,7 +205,7 @@ func rootCmdRun(*cobra.Command, []string) {
// is that it was running, but we see that the container process is not currently running. // is that it was running, but we see that the container process is not currently running.
if r || (!r && s.IsRunning()) { if r || (!r && s.IsRunning()) {
s.Log().Info("detected server is running, re-attaching to process...") s.Log().Info("detected server is running, re-attaching to process...")
if err := s.Environment.Start(); err != nil { if err := s.HandlePowerAction(server.PowerActionStart); err != nil {
s.Log().WithField("error", errors.WithStack(err)).Warn("failed to properly start server detected as already running") s.Log().WithField("error", errors.WithStack(err)).Warn("failed to properly start server detected as already running")
} }
@@ -214,11 +215,11 @@ func rootCmdRun(*cobra.Command, []string) {
// Addresses potentially invalid data in the stored file that can cause Wings to lose // Addresses potentially invalid data in the stored file that can cause Wings to lose
// track of what the actual server state is. // track of what the actual server state is.
s.SetState(server.ProcessOfflineState) s.SetState(server.ProcessOfflineState)
}(serv) })
} }
// Wait until all of the servers are ready to go before we fire up the HTTP server. // Wait until all of the servers are ready to go before we fire up the HTTP server.
wg.Wait() pool.StopWait()
// Initalize SFTP. // Initalize SFTP.
sftp.Initialize(c) sftp.Initialize(c)

View File

@@ -1,19 +1,16 @@
package config package config
import ( import (
"errors"
"fmt" "fmt"
"github.com/apex/log"
"github.com/cobaugh/osrelease" "github.com/cobaugh/osrelease"
"github.com/creasty/defaults" "github.com/creasty/defaults"
"github.com/gbrlsnchs/jwt/v3" "github.com/gbrlsnchs/jwt/v3"
"github.com/pkg/errors"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
"io/ioutil" "io/ioutil"
"os" "os"
"os/exec" "os/exec"
"os/user" "os/user"
"path"
"regexp"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -81,8 +78,14 @@ type Configuration struct {
// to collect data and send events. // to collect data and send events.
PanelLocation string `json:"remote" yaml:"remote"` PanelLocation string `json:"remote" yaml:"remote"`
// AllowedMounts . // AllowedMounts is a list of allowed host-system mount points.
// This is required to have the "Server Mounts" feature work properly.
AllowedMounts []string `json:"allowed_mounts" yaml:"allowed_mounts"` AllowedMounts []string `json:"allowed_mounts" yaml:"allowed_mounts"`
// AllowedOrigins is a list of allowed request origins.
// The Panel URL is automatically allowed, this is only needed for adding
// additional origins.
AllowedOrigins []string `json:"allowed_origins" yaml:"allowed_origins"`
} }
// Defines the configuration of the internal SFTP server. // Defines the configuration of the internal SFTP server.
@@ -148,7 +151,7 @@ func ReadConfiguration(path string) (*Configuration, error) {
return c, nil return c, nil
} }
var Mutex sync.RWMutex var mu sync.RWMutex
var _config *Configuration var _config *Configuration
var _jwtAlgo *jwt.HMACSHA var _jwtAlgo *jwt.HMACSHA
@@ -158,14 +161,14 @@ var _debugViaFlag bool
// anything trying to set a different configuration value, or read the configuration // anything trying to set a different configuration value, or read the configuration
// will be paused until it is complete. // will be paused until it is complete.
func Set(c *Configuration) { func Set(c *Configuration) {
Mutex.Lock() mu.Lock()
if _config == nil || _config.AuthenticationToken != c.AuthenticationToken { if _config == nil || _config.AuthenticationToken != c.AuthenticationToken {
_jwtAlgo = jwt.NewHS256([]byte(c.AuthenticationToken)) _jwtAlgo = jwt.NewHS256([]byte(c.AuthenticationToken))
} }
_config = c _config = c
Mutex.Unlock() mu.Unlock()
} }
func SetDebugViaFlag(d bool) { func SetDebugViaFlag(d bool) {
@@ -175,16 +178,16 @@ func SetDebugViaFlag(d bool) {
// Get the global configuration instance. This is a read-safe operation that will block // Get the global configuration instance. This is a read-safe operation that will block
// if the configuration is presently being modified. // if the configuration is presently being modified.
func Get() *Configuration { func Get() *Configuration {
Mutex.RLock() mu.RLock()
defer Mutex.RUnlock() defer mu.RUnlock()
return _config return _config
} }
// Returns the in-memory JWT algorithm. // Returns the in-memory JWT algorithm.
func GetJwtAlgorithm() *jwt.HMACSHA { func GetJwtAlgorithm() *jwt.HMACSHA {
Mutex.RLock() mu.RLock()
defer Mutex.RUnlock() defer mu.RUnlock()
return _jwtAlgo return _jwtAlgo
} }
@@ -193,7 +196,7 @@ func GetJwtAlgorithm() *jwt.HMACSHA {
func NewFromPath(path string) (*Configuration, error) { func NewFromPath(path string) (*Configuration, error) {
c := new(Configuration) c := new(Configuration)
if err := defaults.Set(c); err != nil { if err := defaults.Set(c); err != nil {
return c, err return c, errors.WithStack(err)
} }
c.unsafeSetPath(path) c.unsafeSetPath(path)
@@ -231,12 +234,12 @@ func (c *Configuration) EnsurePterodactylUser() (*user.User, error) {
if err == nil { if err == nil {
return u, c.setSystemUser(u) return u, c.setSystemUser(u)
} else if _, ok := err.(user.UnknownUserError); !ok { } else if _, ok := err.(user.UnknownUserError); !ok {
return nil, err return nil, errors.WithStack(err)
} }
sysName, err := getSystemName() sysName, err := getSystemName()
if err != nil { if err != nil {
return nil, err return nil, errors.WithStack(err)
} }
var command = fmt.Sprintf("useradd --system --no-create-home --shell /bin/false %s", c.System.Username) var command = fmt.Sprintf("useradd --system --no-create-home --shell /bin/false %s", c.System.Username)
@@ -249,17 +252,17 @@ func (c *Configuration) EnsurePterodactylUser() (*user.User, error) {
// We have to create the group first on Alpine, so do that here before continuing on // We have to create the group first on Alpine, so do that here before continuing on
// to the user creation process. // to the user creation process.
if _, err := exec.Command("addgroup", "-S", c.System.Username).Output(); err != nil { if _, err := exec.Command("addgroup", "-S", c.System.Username).Output(); err != nil {
return nil, err return nil, errors.WithStack(err)
} }
} }
split := strings.Split(command, " ") split := strings.Split(command, " ")
if _, err := exec.Command(split[0], split[1:]...).Output(); err != nil { if _, err := exec.Command(split[0], split[1:]...).Output(); err != nil {
return nil, err return nil, errors.WithStack(err)
} }
if u, err := user.Lookup(c.System.Username); err != nil { if u, err := user.Lookup(c.System.Username); err != nil {
return nil, err return nil, errors.WithStack(err)
} else { } else {
return u, c.setSystemUser(u) return u, c.setSystemUser(u)
} }
@@ -280,58 +283,6 @@ func (c *Configuration) setSystemUser(u *user.User) error {
return c.WriteToDisk() return c.WriteToDisk()
} }
// Ensures that the configured data directory has the correct permissions assigned to
// all of the files and folders within.
func (c *Configuration) EnsureFilePermissions() error {
// Don't run this unless it is configured to be run. On large system this can often slow
// things down dramatically during the boot process.
if !c.System.SetPermissionsOnBoot {
return nil
}
r := regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$")
files, err := ioutil.ReadDir(c.System.Data)
if err != nil {
return err
}
su, err := user.Lookup(c.System.Username)
if err != nil {
return err
}
wg := new(sync.WaitGroup)
for _, file := range files {
wg.Add(1)
// Asynchronously run through the list of files and folders in the data directory. If
// the item is not a folder, or is not a folder that matches the expected UUIDv4 format
// skip over it.
//
// If we do have a positive match, run a chown against the directory.
go func(f os.FileInfo) {
defer wg.Done()
if !f.IsDir() || !r.MatchString(f.Name()) {
return
}
uid, _ := strconv.Atoi(su.Uid)
gid, _ := strconv.Atoi(su.Gid)
if err := os.Chown(path.Join(c.System.Data, f.Name()), uid, gid); err != nil {
log.WithField("error", err).WithField("directory", f.Name()).Warn("failed to chown server directory")
}
}(file)
}
wg.Wait()
return nil
}
// Writes the configuration to the disk as a blocking operation by obtaining an exclusive // Writes the configuration to the disk as a blocking operation by obtaining an exclusive
// lock on the file. This prevents something else from writing at the exact same time and // lock on the file. This prevents something else from writing at the exact same time and
// leading to bad data conditions. // leading to bad data conditions.
@@ -353,11 +304,11 @@ func (c *Configuration) WriteToDisk() error {
b, err := yaml.Marshal(&ccopy) b, err := yaml.Marshal(&ccopy)
if err != nil { if err != nil {
return err return errors.WithStack(err)
} }
if err := ioutil.WriteFile(c.GetPath(), b, 0644); err != nil { if err := ioutil.WriteFile(c.GetPath(), b, 0644); err != nil {
return err return errors.WithStack(err)
} }
return nil return nil
@@ -367,7 +318,7 @@ func (c *Configuration) WriteToDisk() error {
func getSystemName() (string, error) { func getSystemName() (string, error) {
// use osrelease to get release version and ID // use osrelease to get release version and ID
if release, err := osrelease.Read(); err != nil { if release, err := osrelease.Read(); err != nil {
return "", err return "", errors.WithStack(err)
} else { } else {
return release["ID"], nil return release["ID"], nil
} }

View File

@@ -1,5 +1,12 @@
package config package config
import (
"encoding/base64"
"encoding/json"
"github.com/docker/docker/api/types"
"github.com/pkg/errors"
)
type dockerNetworkInterfaces struct { type dockerNetworkInterfaces struct {
V4 struct { V4 struct {
Subnet string `default:"172.18.0.0/16"` Subnet string `default:"172.18.0.0/16"`
@@ -53,4 +60,28 @@ type DockerConfiguration struct {
// Defines the location of the timezone file on the host system that should // Defines the location of the timezone file on the host system that should
// be mounted into the created containers so that they all use the same time. // be mounted into the created containers so that they all use the same time.
TimezonePath string `default:"/etc/timezone" json:"timezone_path" yaml:"timezone_path"` TimezonePath string `default:"/etc/timezone" json:"timezone_path" yaml:"timezone_path"`
// Registries .
Registries map[string]RegistryConfiguration `json:"registries" yaml:"registries"`
}
// RegistryConfiguration .
type RegistryConfiguration struct {
Username string `yaml:"username"`
Password string `yaml:"password"`
}
// Base64 .
func (c RegistryConfiguration) Base64() (string, error) {
authConfig := types.AuthConfig{
Username: c.Username,
Password: c.Password,
}
b, err := json.Marshal(authConfig)
if err != nil {
return "", errors.WithStack(err)
}
return base64.URLEncoding.EncodeToString(b), nil
} }

View File

@@ -81,7 +81,7 @@ func (h *Handler) HandleLog(e *log.Entry) error {
} }
func getErrorStack(err error, i bool) errors.StackTrace { func getErrorStack(err error, i bool) errors.StackTrace {
e, ok := errors.Cause(err).(tracer) e, ok := err.(tracer)
if !ok { if !ok {
if i { if i {
// Just abort out of this and return a stacktrace leading up to this point. It isn't perfect // Just abort out of this and return a stacktrace leading up to this point. It isn't perfect
@@ -89,7 +89,7 @@ func getErrorStack(err error, i bool) errors.StackTrace {
return errors.Wrap(err, "failed to generate stacktrace for caught error").(tracer).StackTrace() return errors.Wrap(err, "failed to generate stacktrace for caught error").(tracer).StackTrace()
} }
return getErrorStack(errors.New(err.Error()), true) return getErrorStack(errors.Wrap(err, err.Error()), true)
} }
st := e.StackTrace() st := e.StackTrace()

View File

@@ -11,8 +11,22 @@ import (
// Set the access request control headers on all of the requests. // Set the access request control headers on all of the requests.
func SetAccessControlHeaders(c *gin.Context) { func SetAccessControlHeaders(c *gin.Context) {
c.Header("Access-Control-Allow-Origin", config.Get().PanelLocation)
c.Header("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization") c.Header("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
o := c.GetHeader("Origin")
if o != config.Get().PanelLocation {
for _, origin := range config.Get().AllowedOrigins {
if o != origin {
continue
}
c.Header("Access-Control-Allow-Origin", origin)
c.Next()
return
}
}
c.Header("Access-Control-Allow-Origin", config.Get().PanelLocation)
c.Next() c.Next()
} }

View File

@@ -35,6 +35,7 @@ func Configure() *gin.Engine {
// These routes use signed URLs to validate access to the resource being requested. // These routes use signed URLs to validate access to the resource being requested.
router.GET("/download/backup", getDownloadBackup) router.GET("/download/backup", getDownloadBackup)
router.GET("/download/file", getDownloadFile) router.GET("/download/file", getDownloadFile)
router.POST("/upload/file", postServerUploadFiles)
// This route is special it sits above all of the other requests because we are // This route is special it sits above all of the other requests because we are
// using a JWT to authorize access to it, therefore it needs to be publicly // using a JWT to authorize access to it, therefore it needs to be publicly

View File

@@ -2,6 +2,7 @@ package router
import ( import (
"bytes" "bytes"
"context"
"github.com/apex/log" "github.com/apex/log"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -13,7 +14,9 @@ import (
// Returns a single server from the collection of servers. // Returns a single server from the collection of servers.
func getServer(c *gin.Context) { func getServer(c *gin.Context) {
c.JSON(http.StatusOK, GetServer(c.Param("server"))) s := GetServer(c.Param("server"))
c.JSON(http.StatusOK, s.Proc())
} }
// Returns the logs for a given server instance. // Returns the logs for a given server instance.
@@ -45,13 +48,15 @@ func getServerLogs(c *gin.Context) {
func postServerPower(c *gin.Context) { func postServerPower(c *gin.Context) {
s := GetServer(c.Param("server")) s := GetServer(c.Param("server"))
var data server.PowerAction var data struct{
// BindJSON sends 400 if the request fails, all we need to do is return Action server.PowerAction `json:"action"`
}
if err := c.BindJSON(&data); err != nil { if err := c.BindJSON(&data); err != nil {
return return
} }
if !data.IsValid() { if !data.Action.IsValid() {
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{ c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
"error": "The power action provided was not valid, should be one of \"stop\", \"start\", \"restart\", \"kill\"", "error": "The power action provided was not valid, should be one of \"stop\", \"start\", \"restart\", \"kill\"",
}) })
@@ -64,7 +69,7 @@ func postServerPower(c *gin.Context) {
// //
// We don't really care about any of the other actions at this point, they'll all result // We don't really care about any of the other actions at this point, they'll all result
// in the process being stopped, which should have happened anyways if the server is suspended. // in the process being stopped, which should have happened anyways if the server is suspended.
if (data.Action == "start" || data.Action == "restart") && s.IsSuspended() { if (data.Action == server.PowerActionStart || data.Action == server.PowerActionRestart) && s.IsSuspended() {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Cannot start or restart a server that is suspended.", "error": "Cannot start or restart a server that is suspended.",
}) })
@@ -74,11 +79,16 @@ func postServerPower(c *gin.Context) {
// Pass the actual heavy processing off to a seperate thread to handle so that // Pass the actual heavy processing off to a seperate thread to handle so that
// we can immediately return a response from the server. Some of these actions // we can immediately return a response from the server. Some of these actions
// can take quite some time, especially stopping or restarting. // can take quite some time, especially stopping or restarting.
go func(server *server.Server) { go func(s *server.Server) {
if err := server.HandlePowerAction(data); err != nil { if err := s.HandlePowerAction(data.Action, 30); err != nil {
server.Log().WithFields(log.Fields{"action": data, "error": err}). if errors.Is(err, context.DeadlineExceeded) {
s.Log().WithField("action", data.Action).
Warn("could not acquire a lock while attempting to perform a power action")
} else {
s.Log().WithFields(log.Fields{"action": data, "error": err}).
Error("encountered error processing a server power action in the background") Error("encountered error processing a server power action in the background")
} }
}
}(s) }(s)
c.Status(http.StatusAccepted) c.Status(http.StatusAccepted)

View File

@@ -4,12 +4,16 @@ import (
"bufio" "bufio"
"context" "context"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"mime/multipart"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"path" "path"
"path/filepath"
"strconv" "strconv"
"strings" "strings"
) )
@@ -129,7 +133,17 @@ func putServerRenameFiles(c *gin.Context) {
case <-ctx.Done(): case <-ctx.Done():
return ctx.Err() return ctx.Err()
default: default:
return s.Filesystem.Rename(pf, pt) if err := s.Filesystem.Rename(pf, pt); err != nil {
// Return nil if the error is an is not exists.
// NOTE: os.IsNotExist() does not work if the error is wrapped.
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
return nil
} }
}) })
} }
@@ -155,6 +169,13 @@ func postServerCopyFile(c *gin.Context) {
} }
if err := s.Filesystem.Copy(data.Location); err != nil { if err := s.Filesystem.Copy(data.Location); err != nil {
// Check if the file does not exist.
// NOTE: os.IsNotExist() does not work if the error is wrapped.
if errors.Is(err, os.ErrNotExist) {
c.Status(http.StatusNotFound)
return
}
TrackedServerError(err, s).AbortWithServerError(c) TrackedServerError(err, s).AbortWithServerError(c)
return return
} }
@@ -177,7 +198,7 @@ func postServerDeleteFiles(c *gin.Context) {
if len(data.Files) == 0 { if len(data.Files) == 0 {
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{ c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
"error": "No files were specififed for deletion.", "error": "No files were specified for deletion.",
}) })
return return
} }
@@ -311,9 +332,86 @@ func postServerDecompressFiles(c *gin.Context) {
} }
if err := s.Filesystem.DecompressFile(data.RootPath, data.File); err != nil { if err := s.Filesystem.DecompressFile(data.RootPath, data.File); err != nil {
// Check if the file does not exist.
// NOTE: os.IsNotExist() does not work if the error is wrapped.
if errors.Is(err, os.ErrNotExist) {
c.Status(http.StatusNotFound)
return
}
TrackedServerError(err, s).AbortWithServerError(c) TrackedServerError(err, s).AbortWithServerError(c)
return return
} }
c.Status(http.StatusNoContent) c.Status(http.StatusNoContent)
} }
func postServerUploadFiles(c *gin.Context) {
token := tokens.UploadPayload{}
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
TrackedError(err).AbortWithServerError(c)
return
}
s := GetServer(token.ServerUuid)
if s == nil || !token.IsUniqueRequest() {
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
"error": "The requested resource was not found on this server.",
})
return
}
if !s.Filesystem.HasSpaceAvailable() {
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
"error": "This server does not have enough available disk space to accept any file uploads.",
})
return
}
form, err := c.MultipartForm()
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Failed to get multipart form.",
})
return
}
headers, ok := form.File["files"]
if !ok {
c.AbortWithStatusJSON(http.StatusNotModified, gin.H{
"error": "No files were attached to the request.",
})
return
}
directory := c.Query("directory")
for _, header := range headers {
p, err := s.Filesystem.SafePath(filepath.Join(directory, header.Filename))
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
// We run this in a different method so I can use defer without any of
// the consequences caused by calling it in a loop.
if err := handleFileUpload(p, s, header); err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
}
}
func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader) error {
file, err := header.Open()
if err != nil {
return errors.WithStack(err)
}
defer file.Close()
if err := s.Filesystem.Writefile(p, file); err != nil {
return errors.WithStack(err)
}
return nil
}

View File

@@ -6,6 +6,7 @@ import (
type BackupPayload struct { type BackupPayload struct {
jwt.Payload jwt.Payload
ServerUuid string `json:"server_uuid"` ServerUuid string `json:"server_uuid"`
BackupUuid string `json:"backup_uuid"` BackupUuid string `json:"backup_uuid"`
UniqueId string `json:"unique_id"` UniqueId string `json:"unique_id"`

25
router/tokens/upload.go Normal file
View File

@@ -0,0 +1,25 @@
package tokens
import (
"github.com/gbrlsnchs/jwt/v3"
)
type UploadPayload struct {
jwt.Payload
ServerUuid string `json:"server_uuid"`
UniqueId string `json:"unique_id"`
}
// Returns the JWT payload.
func (p *UploadPayload) GetPayload() *jwt.Payload {
return &p.Payload
}
// Determines if this JWT is valid for the given request cycle. If the
// unique ID passed in the token has already been seen before this will
// return false. This allows us to use this JWT as a one-time token that
// validates all of the request.
func (p *UploadPayload) IsUniqueRequest() bool {
return getTokenStore().IsValidToken(p.UniqueId)
}

View File

@@ -26,9 +26,9 @@ func (h *Handler) ListenForExpiration(ctx context.Context) {
jwt := h.GetJwt() jwt := h.GetJwt()
if jwt != nil { if jwt != nil {
if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 0 { if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 0 {
h.SendJson(&Message{Event: TokenExpiredEvent}) _ = h.SendJson(&Message{Event: TokenExpiredEvent})
} else if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 180 { } else if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 180 {
h.SendJson(&Message{Event: TokenExpiringEvent}) _ = h.SendJson(&Message{Event: TokenExpiringEvent})
} }
} }
} }
@@ -43,6 +43,8 @@ func (h *Handler) ListenForServerEvents(ctx context.Context) {
server.StatusEvent, server.StatusEvent,
server.ConsoleOutputEvent, server.ConsoleOutputEvent,
server.InstallOutputEvent, server.InstallOutputEvent,
server.InstallStartedEvent,
server.InstallCompletedEvent,
server.DaemonMessageEvent, server.DaemonMessageEvent,
server.BackupCompletedEvent, server.BackupCompletedEvent,
} }
@@ -52,6 +54,7 @@ func (h *Handler) ListenForServerEvents(ctx context.Context) {
h.server.Events().Subscribe(event, eventChannel) h.server.Events().Subscribe(event, eventChannel)
} }
for d := range eventChannel {
select { select {
case <-ctx.Done(): case <-ctx.Done():
for _, event := range events { for _, event := range events {
@@ -60,9 +63,7 @@ func (h *Handler) ListenForServerEvents(ctx context.Context) {
close(eventChannel) close(eventChannel)
default: default:
// Listen for different events emitted by the server and respond to them appropriately. _ = h.SendJson(&Message{
for d := range eventChannel {
h.SendJson(&Message{
Event: d.Topic, Event: d.Topic,
Args: []string{d.Data}, Args: []string{d.Data},
}) })

View File

@@ -1,6 +1,7 @@
package websocket package websocket
import ( import (
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/apex/log" "github.com/apex/log"
@@ -12,7 +13,6 @@ import (
"github.com/pterodactyl/wings/router/tokens" "github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
"net/http" "net/http"
"os"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -57,7 +57,20 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
// Ensure that the websocket request is originating from the Panel itself, // Ensure that the websocket request is originating from the Panel itself,
// and not some other location. // and not some other location.
CheckOrigin: func(r *http.Request) bool { CheckOrigin: func(r *http.Request) bool {
return r.Header.Get("Origin") == config.Get().PanelLocation o := r.Header.Get("Origin")
if o == config.Get().PanelLocation {
return true
}
for _, origin := range config.Get().AllowedOrigins {
if o != origin {
continue
}
return true
}
return false
}, },
} }
@@ -137,7 +150,7 @@ func (h *Handler) TokenValid() error {
// Sends an error back to the connected websocket instance by checking the permissions // Sends an error back to the connected websocket instance by checking the permissions
// of the token. If the user has the "receive-errors" grant we will send back the actual // of the token. If the user has the "receive-errors" grant we will send back the actual
// error message, otherwise we just send back a standard error message. // error message, otherwise we just send back a standard error message.
func (h *Handler) SendErrorJson(msg Message, err error) error { func (h *Handler) SendErrorJson(msg Message, err error, shouldLog ...bool) error {
j := h.GetJwt() j := h.GetJwt()
message := "an unexpected error was encountered while handling this request" message := "an unexpected error was encountered while handling this request"
@@ -150,10 +163,12 @@ func (h *Handler) SendErrorJson(msg Message, err error) error {
wsm := Message{Event: ErrorEvent} wsm := Message{Event: ErrorEvent}
wsm.Args = []string{m} wsm.Args = []string{m}
if len(shouldLog) == 0 || (len(shouldLog) == 1 && shouldLog[0] == true) {
if !server.IsSuspendedError(err) { if !server.IsSuspendedError(err) {
h.server.Log().WithFields(log.Fields{"event": msg.Event, "error_identifier": u.String(), "error": err}). h.server.Log().WithFields(log.Fields{"event": msg.Event, "error_identifier": u.String(), "error": err}).
Error("failed to handle websocket process; an error was encountered processing an event") Error("failed to handle websocket process; an error was encountered processing an event")
} }
}
return h.unsafeSendJson(wsm) return h.unsafeSendJson(wsm)
} }
@@ -258,37 +273,34 @@ func (h *Handler) HandleInbound(m Message) error {
} }
case SetStateEvent: case SetStateEvent:
{ {
switch strings.Join(m.Args, "") { action := server.PowerAction(strings.Join(m.Args, ""))
case "start":
if h.GetJwt().HasPermission(PermissionSendPowerStart) { actions := make(map[server.PowerAction]string)
return h.server.Environment.Start() actions[server.PowerActionStart] = PermissionSendPowerStart
actions[server.PowerActionStop] = PermissionSendPowerStop
actions[server.PowerActionRestart] = PermissionSendPowerRestart
actions[server.PowerActionTerminate] = PermissionSendPowerStop
// Check that they have permission to perform this action if it is needed.
if permission, exists := actions[action]; exists {
if !h.GetJwt().HasPermission(permission) {
return nil
} }
break
case "stop":
if h.GetJwt().HasPermission(PermissionSendPowerStop) {
return h.server.Environment.Stop()
} }
break
case "restart": err := h.server.HandlePowerAction(action)
if h.GetJwt().HasPermission(PermissionSendPowerRestart) { if errors.Is(err, context.DeadlineExceeded) {
// If the server is alreay restarting don't do anything. Perhaps we send back an event m, _ := h.GetErrorMessage("another power action is currently being processed for this server, please try again later")
// in the future for this? For now no reason to knowingly trigger an error by trying to
// restart a process already restarting. h.SendJson(&Message{
if h.server.Environment.IsRestarting() { Event: ErrorEvent,
Args: []string{m},
})
return nil return nil
} }
return h.server.Environment.Restart() return err
}
break
case "kill":
if h.GetJwt().HasPermission(PermissionSendPowerStop) {
return h.server.Environment.Terminate(os.Kill)
}
break
}
return nil
} }
case SendServerLogsEvent: case SendServerLogsEvent:
{ {

View File

@@ -1,34 +1,32 @@
package server package server
import ( import (
"github.com/pterodactyl/wings/parser" "github.com/gammazero/workerpool"
"sync" "runtime"
) )
// Parent function that will update all of the defined configuration files for a server // Parent function that will update all of the defined configuration files for a server
// automatically to ensure that they always use the specified values. // automatically to ensure that they always use the specified values.
func (s *Server) UpdateConfigurationFiles() { func (s *Server) UpdateConfigurationFiles() {
wg := new(sync.WaitGroup) pool := workerpool.New(runtime.NumCPU())
files := s.ProcessConfiguration().ConfigurationFiles files := s.ProcessConfiguration().ConfigurationFiles
for _, v := range files { for _, cf := range files {
wg.Add(1) f := cf
go func(f parser.ConfigurationFile, server *Server) { pool.Submit(func() {
defer wg.Done() p, err := s.Filesystem.SafePath(f.FileName)
p, err := server.Filesystem.SafePath(f.FileName)
if err != nil { if err != nil {
server.Log().WithField("error", err).Error("failed to generate safe path for configuration file") s.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
return return
} }
if err := f.Parse(p, false); err != nil { if err := f.Parse(p, false); err != nil {
server.Log().WithField("error", err).Error("failed to parse and update server configuration file") s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
} }
}(v, s) })
} }
wg.Wait() pool.StopWait()
} }

View File

@@ -82,5 +82,5 @@ func (s *Server) handleServerCrash() error {
s.crasher.SetLastCrash(time.Now()) s.crasher.SetLastCrash(time.Now())
return s.Environment.Start() return s.HandlePowerAction(PowerActionStart)
} }

View File

@@ -36,7 +36,6 @@ type Environment interface {
// unnecessary double/triple/quad looping issues if multiple people press restart or spam the // unnecessary double/triple/quad looping issues if multiple people press restart or spam the
// button to restart. // button to restart.
Restart() error Restart() error
IsRestarting() bool
// Waits for a server instance to stop gracefully. If the server is still detected // Waits for a server instance to stop gracefully. If the server is still detected
// as running after seconds, an error will be returned, or the server will be terminated // as running after seconds, an error will be returned, or the server will be terminated

View File

@@ -16,7 +16,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pterodactyl/wings/api" "github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"golang.org/x/sync/semaphore"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
@@ -35,28 +34,19 @@ type DockerEnvironment struct {
// The Docker client being used for this instance. // The Docker client being used for this instance.
Client *client.Client Client *client.Client
// Tracks if we are currently attached to the server container. This allows us to attach
// once and then just use that attachment to stream logs out of the server and also stream
// commands back into it without constantly attaching and detaching.
attached bool
// Controls the hijacked response stream which exists only when we're attached to // Controls the hijacked response stream which exists only when we're attached to
// the running container instance. // the running container instance.
stream types.HijackedResponse stream *types.HijackedResponse
// Holds the stats stream used by the polling commands so that we can easily close // Holds the stats stream used by the polling commands so that we can easily close
// it out. // it out.
stats io.ReadCloser stats io.ReadCloser
// Locks when we're performing a restart to avoid trying to restart a process that is already
// being restarted.
restartSem *semaphore.Weighted
} }
// Set if this process is currently attached to the process. // Set if this process is currently attached to the process.
func (d *DockerEnvironment) SetAttached(a bool) { func (d *DockerEnvironment) SetStream(s *types.HijackedResponse) {
d.Lock() d.Lock()
d.attached = a d.stream = s
d.Unlock() d.Unlock()
} }
@@ -65,7 +55,7 @@ func (d *DockerEnvironment) IsAttached() bool {
d.RLock() d.RLock()
defer d.RUnlock() defer d.RUnlock()
return d.attached return d.stream != nil
} }
// Creates a new base Docker environment. A server must still be attached to it. // Creates a new base Docker environment. A server must still be attached to it.
@@ -313,7 +303,7 @@ func (d *DockerEnvironment) Stop() error {
// If the container does not exist just mark the process as stopped and return without // If the container does not exist just mark the process as stopped and return without
// an error. // an error.
if client.IsErrNotFound(err) { if client.IsErrNotFound(err) {
d.SetAttached(false) d.SetStream(nil)
d.Server.SetState(ProcessOfflineState) d.Server.SetState(ProcessOfflineState)
return nil return nil
@@ -325,60 +315,19 @@ func (d *DockerEnvironment) Stop() error {
return nil return nil
} }
// Try to acquire a lock to restart the server. If one cannot be obtained within 5 seconds return
// an error to the caller. You should ideally be checking IsRestarting() before calling this function
// to avoid unnecessary delays since you can respond immediately from that.
func (d *DockerEnvironment) acquireRestartLock() error {
if d.restartSem == nil {
d.restartSem = semaphore.NewWeighted(1)
}
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
return d.restartSem.Acquire(ctx, 1)
}
// Restarts the server process by waiting for the process to gracefully stop and then triggering a // Restarts the server process by waiting for the process to gracefully stop and then triggering a
// start command. This will return an error if there is already a restart process executing for the // start command. This will return an error if there is already a restart process executing for the
// server. The lock is released when the process is stopped and a start has begun. // server. The lock is released when the process is stopped and a start has begun.
func (d *DockerEnvironment) Restart() error { func (d *DockerEnvironment) Restart() error {
d.Server.Log().Debug("attempting to acquire restart lock...")
if err := d.acquireRestartLock(); err != nil {
d.Server.Log().Warn("failed to acquire restart lock; already acquired by a different process")
return err
}
d.Server.Log().Debug("acquired restart lock")
err := d.WaitForStop(60, false) err := d.WaitForStop(60, false)
if err != nil { if err != nil {
d.restartSem.Release(1)
return err return err
} }
// Release the restart lock, it is now safe for someone to attempt restarting the server again.
d.restartSem.Release(1)
// Start the process. // Start the process.
return d.Start() return d.Start()
} }
// Check if the server is currently running the restart process by checking if there is a semaphore
// allocated, and if so, if we can aquire a lock on it.
func (d *DockerEnvironment) IsRestarting() bool {
if d.restartSem == nil {
return false
}
if d.restartSem.TryAcquire(1) {
d.restartSem.Release(1)
return false
}
return true
}
// Attempts to gracefully stop a server using the defined stop command. If the server // Attempts to gracefully stop a server using the defined stop command. If the server
// does not stop after seconds have passed, an error will be returned, or the instance // does not stop after seconds have passed, an error will be returned, or the instance
// will be terminated forcefully depending on the value of the second argument. // will be terminated forcefully depending on the value of the second argument.
@@ -469,7 +418,7 @@ func (d *DockerEnvironment) ExitState() (uint32, bool, error) {
// //
// However, someone reported an error in Discord about this scenario happening, // However, someone reported an error in Discord about this scenario happening,
// so I guess this should prevent it? They didn't tell me how they caused it though // so I guess this should prevent it? They didn't tell me how they caused it though
// so thats a mystery that will have to go unsolved. // so that's a mystery that will have to go unsolved.
// //
// @see https://github.com/pterodactyl/panel/issues/2003 // @see https://github.com/pterodactyl/panel/issues/2003
if client.IsErrNotFound(err) { if client.IsErrNotFound(err) {
@@ -495,23 +444,24 @@ func (d *DockerEnvironment) Attach() error {
return errors.WithStack(err) return errors.WithStack(err)
} }
var err error opts := types.ContainerAttachOptions{
d.stream, err = d.Client.ContainerAttach(context.Background(), d.Server.Id(), types.ContainerAttachOptions{
Stdin: true, Stdin: true,
Stdout: true, Stdout: true,
Stderr: true, Stderr: true,
Stream: true, Stream: true,
}) }
if err != nil { // Set the stream again with the container.
if st, err := d.Client.ContainerAttach(context.Background(), d.Server.Id(), opts); err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} else {
d.SetStream(&st)
} }
console := Console{ console := Console{
Server: d.Server, Server: d.Server,
} }
d.SetAttached(true)
go func() { go func() {
if err := d.EnableResourcePolling(); err != nil { if err := d.EnableResourcePolling(); err != nil {
d.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to enable resource polling on server") d.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to enable resource polling on server")
@@ -522,7 +472,7 @@ func (d *DockerEnvironment) Attach() error {
defer d.stream.Close() defer d.stream.Close()
defer func() { defer func() {
d.Server.SetState(ProcessOfflineState) d.Server.SetState(ProcessOfflineState)
d.SetAttached(false) d.SetStream(nil)
}() }()
io.Copy(console, d.stream.Reader) io.Copy(console, d.stream.Reader)
@@ -654,7 +604,33 @@ func (d *DockerEnvironment) ensureImageExists() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
defer cancel() defer cancel()
out, err := d.Client.ImagePull(ctx, d.Image(), types.ImagePullOptions{All: false}) image := d.Image()
// Get a registry auth configuration from the config.
var registryAuth *config.RegistryConfiguration
for registry, c := range config.Get().Docker.Registries {
if !strings.HasPrefix(image, registry) {
continue
}
log.WithField("registry", registry).Debug("using authentication for repository")
registryAuth = &c
break
}
// Get the ImagePullOptions.
imagePullOptions := types.ImagePullOptions{All: false}
if registryAuth != nil {
b64, err := registryAuth.Base64()
if err != nil {
log.WithError(err).Error("failed to get registry auth credentials")
}
// b64 is a string so if there is an error it will just be empty, not nil.
imagePullOptions.RegistryAuth = b64
}
out, err := d.Client.ImagePull(ctx, image, imagePullOptions)
if err != nil { if err != nil {
images, ierr := d.Client.ImageList(ctx, types.ImageListOptions{}) images, ierr := d.Client.ImageList(ctx, types.ImageListOptions{})
if ierr != nil { if ierr != nil {
@@ -740,49 +716,25 @@ func (d *DockerEnvironment) Create() error {
}, },
} }
mounts := []mount.Mount{ mounts, err := d.getContainerMounts()
{ if err != nil {
Target: "/home/container", return errors.WithMessage(err, "could not build container mount points slice")
Source: d.Server.Filesystem.Path(),
Type: mount.TypeBind,
ReadOnly: false,
},
} }
var mounted bool customMounts, err := d.getCustomMounts()
for _, m := range d.Server.Config().Mounts { if err != nil {
mounted = false return errors.WithMessage(err, "could not build custom container mount points slice")
source := filepath.Clean(m.Source)
target := filepath.Clean(m.Target)
for _, allowed := range config.Get().AllowedMounts {
if !strings.HasPrefix(source, allowed) {
continue
} }
mounts = append(mounts, mount.Mount{ if len(customMounts) > 0 {
Type: mount.TypeBind, mounts = append(mounts, customMounts...)
Source: source, for _, m := range customMounts {
Target: target, d.Server.Log().WithFields(log.Fields{
ReadOnly: m.ReadOnly, "source_path": m.Source,
}) "target_path": m.Target,
mounted = true
break
}
logger := log.WithFields(log.Fields{
"server": d.Server.Id(),
"source_path": source,
"target_path": target,
"read_only": m.ReadOnly, "read_only": m.ReadOnly,
}) }).Debug("attaching custom server mount point to container")
if mounted {
logger.Debug("attaching mount to server's container")
} else {
logger.Warn("skipping mount because it isn't allowed")
} }
} }
@@ -833,9 +785,96 @@ func (d *DockerEnvironment) Create() error {
return nil return nil
} }
// Returns the default container mounts for the server instance. This includes the data directory
// for the server as well as any timezone related files if they exist on the host system so that
// servers running within the container will use the correct time.
func (d *DockerEnvironment) getContainerMounts() ([]mount.Mount, error) {
var m []mount.Mount
m = append(m, mount.Mount{
Target: "/home/container",
Source: d.Server.Filesystem.Path(),
Type: mount.TypeBind,
ReadOnly: false,
})
// Try to mount in /etc/localtime and /etc/timezone if they exist on the host system.
if _, err := os.Stat("/etc/localtime"); err != nil {
if !os.IsNotExist(err) {
return nil, err
}
} else {
m = append(m, mount.Mount{
Target: "/etc/localtime",
Source: "/etc/localtime",
Type: mount.TypeBind,
ReadOnly: true,
})
}
if _, err := os.Stat("/etc/timezone"); err != nil {
if !os.IsNotExist(err) {
return nil, err
}
} else {
m = append(m, mount.Mount{
Target: "/etc/timezone",
Source: "/etc/timezone",
Type: mount.TypeBind,
ReadOnly: true,
})
}
return m, nil
}
// Returns the custom mounts for a given server after verifying that they are within a list of
// allowed mount points for the node.
func (d *DockerEnvironment) getCustomMounts() ([]mount.Mount, error) {
var mounts []mount.Mount
// TODO: probably need to handle things trying to mount directories that do not exist.
for _, m := range d.Server.Config().Mounts {
source := filepath.Clean(m.Source)
target := filepath.Clean(m.Target)
logger := d.Server.Log().WithFields(log.Fields{
"source_path": source,
"target_path": target,
"read_only": m.ReadOnly,
})
mounted := false
for _, allowed := range config.Get().AllowedMounts {
if !strings.HasPrefix(source, allowed) {
continue
}
mounted = true
mounts = append(mounts, mount.Mount{
Source: source,
Target: target,
Type: mount.TypeBind,
ReadOnly: m.ReadOnly,
})
break
}
if !mounted {
logger.Warn("skipping custom server mount, not in list of allowed mount points")
}
}
return mounts, nil
}
// Sends the specified command to the stdin of the running container instance. There is no // Sends the specified command to the stdin of the running container instance. There is no
// confirmation that this data is sent successfully, only that it gets pushed into the stdin. // confirmation that this data is sent successfully, only that it gets pushed into the stdin.
func (d *DockerEnvironment) SendCommand(c string) error { func (d *DockerEnvironment) SendCommand(c string) error {
d.RLock()
defer d.RUnlock()
if !d.IsAttached() { if !d.IsAttached() {
return errors.New("attempting to send command to non-attached instance") return errors.New("attempting to send command to non-attached instance")
} }
@@ -928,7 +967,7 @@ func (d *DockerEnvironment) portBindings() nat.PortMap {
for ip, ports := range d.Server.Config().Allocations.Mappings { for ip, ports := range d.Server.Config().Allocations.Mappings {
for _, port := range ports { for _, port := range ports {
// Skip over invalid ports. // Skip over invalid ports.
if port < 0 || port > 65535 { if port < 1 || port > 65535 {
continue continue
} }

View File

@@ -11,6 +11,8 @@ import (
const ( const (
DaemonMessageEvent = "daemon message" DaemonMessageEvent = "daemon message"
InstallOutputEvent = "install output" InstallOutputEvent = "install output"
InstallStartedEvent = "install started"
InstallCompletedEvent = "install completed"
ConsoleOutputEvent = "console output" ConsoleOutputEvent = "console output"
StatusEvent = "status" StatusEvent = "status"
StatsEvent = "stats" StatsEvent = "stats"
@@ -25,14 +27,17 @@ type Event struct {
type EventBus struct { type EventBus struct {
sync.RWMutex sync.RWMutex
subscribers map[string][]chan Event subscribers map[string]map[chan Event]struct{}
} }
// Returns the server's emitter instance. // Returns the server's emitter instance.
func (s *Server) Events() *EventBus { func (s *Server) Events() *EventBus {
s.emitterLock.Lock()
defer s.emitterLock.Unlock()
if s.emitter == nil { if s.emitter == nil {
s.emitter = &EventBus{ s.emitter = &EventBus{
subscribers: map[string][]chan Event{}, subscribers: make(map[string]map[chan Event]struct{}),
} }
} }
@@ -41,9 +46,6 @@ func (s *Server) Events() *EventBus {
// Publish data to a given topic. // Publish data to a given topic.
func (e *EventBus) Publish(topic string, data string) { func (e *EventBus) Publish(topic string, data string) {
e.RLock()
defer e.RUnlock()
t := topic t := topic
// Some of our topics for the socket support passing a more specific namespace, // Some of our topics for the socket support passing a more specific namespace,
// such as "backup completed:1234" to indicate which specific backup was completed. // such as "backup completed:1234" to indicate which specific backup was completed.
@@ -58,13 +60,19 @@ func (e *EventBus) Publish(topic string, data string) {
} }
} }
// Acquire a read lock and loop over all of the channels registered for the topic. This
// avoids a panic crash if the process tries to unregister the channel while this routine
// is running.
go func() {
e.RLock()
defer e.RUnlock()
if ch, ok := e.subscribers[t]; ok { if ch, ok := e.subscribers[t]; ok {
go func(data Event, cs []chan Event) { for channel := range ch {
for _, channel := range cs { channel <- Event{Data: data, Topic: topic}
channel <- data
} }
}(Event{Data: data, Topic: topic}, ch)
} }
}()
} }
func (e *EventBus) PublishJson(topic string, data interface{}) error { func (e *EventBus) PublishJson(topic string, data interface{}) error {
@@ -83,24 +91,25 @@ func (e *EventBus) Subscribe(topic string, ch chan Event) {
e.Lock() e.Lock()
defer e.Unlock() defer e.Unlock()
if p, ok := e.subscribers[topic]; ok { if _, exists := e.subscribers[topic]; !exists {
e.subscribers[topic] = append(p, ch) e.subscribers[topic] = make(map[chan Event]struct{})
} else { }
e.subscribers[topic] = append([]chan Event{}, ch)
// Only set the channel if there is not currently a matching one for this topic. This
// avoids registering two identical listeners for the same topic and causing pain in
// the unsubscribe functionality as well.
if _, exists := e.subscribers[topic][ch]; !exists {
e.subscribers[topic][ch] = struct{}{}
} }
} }
// Unsubscribe a channel from a topic. // Unsubscribe a channel from a given topic.
func (e *EventBus) Unsubscribe(topic string, ch chan Event) { func (e *EventBus) Unsubscribe(topic string, ch chan Event) {
e.Lock() e.Lock()
defer e.Unlock() defer e.Unlock()
if _, ok := e.subscribers[topic]; ok { if _, exists := e.subscribers[topic][ch]; exists {
for i := range e.subscribers[topic] { delete(e.subscribers[topic], ch)
if ch == e.subscribers[topic][i] {
e.subscribers[topic] = append(e.subscribers[topic][:i], e.subscribers[topic][i+1:]...)
}
}
} }
} }
@@ -112,9 +121,6 @@ func (e *EventBus) UnsubscribeAll() {
e.Lock() e.Lock()
defer e.Unlock() defer e.Unlock()
// Loop over all of the subscribers and just remove all of the events // Reset the entire struct into an empty map.
// for them. e.subscribers = make(map[string]map[chan Event]struct{})
for t := range e.subscribers {
e.subscribers[t] = make([]chan Event, 0)
}
} }

View File

@@ -114,10 +114,10 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
} }
// Generate a path to the file by cleaning it up and appending the root server path to it. This // Generate a path to the file by cleaning it up and appending the root server path to it. This
// DOES NOT gaurantee that the file resolves within the server data directory. You'll want to use // DOES NOT guarantee that the file resolves within the server data directory. You'll want to use
// the fs.unsafeIsInDataDirectory(p) function to confirm. // the fs.unsafeIsInDataDirectory(p) function to confirm.
func (fs *Filesystem) unsafeFilePath(p string) string { func (fs *Filesystem) unsafeFilePath(p string) string {
// Calling filpath.Clean on the joined directory will resolve it to the absolute path, // Calling filepath.Clean on the joined directory will resolve it to the absolute path,
// removing any ../ type of resolution arguments, and leaving us with a direct path link. // removing any ../ type of resolution arguments, and leaving us with a direct path link.
// //
// This will also trim the existing root path off the beginning of the path passed to // This will also trim the existing root path off the beginning of the path passed to
@@ -424,7 +424,7 @@ func (fs *Filesystem) unsafeStat(p string) (*Stat, error) {
return st, nil return st, nil
} }
// Creates a new directory (name) at a specificied path (p) for the server. // Creates a new directory (name) at a specified path (p) for the server.
func (fs *Filesystem) CreateDirectory(name string, p string) error { func (fs *Filesystem) CreateDirectory(name string, p string) error {
cleaned, err := fs.SafePath(path.Join(p, name)) cleaned, err := fs.SafePath(path.Join(p, name))
if err != nil { if err != nil {
@@ -463,72 +463,48 @@ func (fs *Filesystem) Rename(from string, to string) error {
return os.Rename(cleanedFrom, cleanedTo) return os.Rename(cleanedFrom, cleanedTo)
} }
// Recursively iterates over a directory and sets the permissions on all of the // Recursively iterates over a file or directory and sets the permissions on all of the
// underlying files. // underlying files. Iterate over all of the files and directories. If it is a file just
// go ahead and perform the chown operation. Otherwise dig deeper into the directory until
// we've run out of directories to dig into.
func (fs *Filesystem) Chown(path string) error { func (fs *Filesystem) Chown(path string) error {
cleaned, err := fs.SafePath(path) cleaned, err := fs.SafePath(path)
if err != nil { if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} }
if s, err := os.Stat(cleaned); err != nil { uid := config.Get().System.User.Uid
gid := config.Get().System.User.Gid
// Start by just chowning the initial path that we received.
if err := os.Chown(cleaned, uid, gid); err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} else if !s.IsDir() {
return os.Chown(cleaned, config.Get().System.User.Uid, config.Get().System.User.Gid)
} }
return fs.chownDirectory(cleaned) // If this is not a directory we can now return from the function, there is nothing
// left that we need to do.
if st, _ := os.Stat(cleaned); !st.IsDir() {
return nil
} }
// Iterate over all of the files and directories. If it is a file just go ahead and perform // If this was a directory, begin walking over its contents recursively and ensure that all
// the chown operation. Otherwise dig deeper into the directory until we've run out of // of the subfiles and directories get their permissions updated as well.
// directories to dig into. return fs.Walk(cleaned, func(path string, f os.FileInfo, err error) error {
func (fs *Filesystem) chownDirectory(path string) error {
var wg sync.WaitGroup
cleaned, err := fs.SafePath(path)
if err != nil { if err != nil {
return errors.WithStack(err) return fs.handleWalkerError(err, f)
} }
// Chown the directory itself.
os.Chown(cleaned, config.Get().System.User.Uid, config.Get().System.User.Gid)
files, err := ioutil.ReadDir(cleaned)
if err != nil {
return errors.WithStack(err)
}
for _, f := range files {
// Do not attempt to chmod a symlink. Go's os.Chown function will affect the symlink // Do not attempt to chmod a symlink. Go's os.Chown function will affect the symlink
// so if it points to a location outside the data directory the user would be able to // so if it points to a location outside the data directory the user would be able to
// (un)intentionally modify that files permissions. // (un)intentionally modify that files permissions.
if f.Mode()&os.ModeSymlink != 0 { if f.Mode()&os.ModeSymlink != 0 {
continue
}
p, err := fs.SafeJoin(cleaned, f)
if err != nil {
return err
}
if f.IsDir() {
wg.Add(1)
go func(p string) {
defer wg.Done()
fs.chownDirectory(p)
}(p)
} else {
os.Chown(p, config.Get().System.User.Uid, config.Get().System.User.Gid)
}
}
wg.Wait()
return nil return nil
} }
return os.Chown(path, uid, gid)
})
}
// Copies a given file to the same location and appends a suffix to the file to indicate that // Copies a given file to the same location and appends a suffix to the file to indicate that
// it has been copied. // it has been copied.
// //
@@ -540,7 +516,7 @@ func (fs *Filesystem) Copy(p string) error {
} }
if s, err := os.Stat(cleaned); err != nil { if s, err := os.Stat(cleaned); err != nil {
return err return errors.WithStack(err)
} else if s.IsDir() || !s.Mode().IsRegular() { } else if s.IsDir() || !s.Mode().IsRegular() {
// If this is a directory or not a regular file, just throw a not-exist error // If this is a directory or not a regular file, just throw a not-exist error
// since anything calling this function should understand what that means. // since anything calling this function should understand what that means.

View File

@@ -73,6 +73,11 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
return errors.WithStack(err) return errors.WithStack(err)
} }
// Make sure the file exists basically.
if _, err := os.Stat(source); err != nil {
return errors.WithStack(err)
}
// Walk over all of the files spinning up an additional go-routine for each file we've encountered // Walk over all of the files spinning up an additional go-routine for each file we've encountered
// and then extract that file from the archive and write it to the disk. If any part of this process // and then extract that file from the archive and write it to the disk. If any part of this process
// encounters an error the entire process will be stopped. // encounters an error the entire process will be stopped.

View File

@@ -39,7 +39,7 @@ func newPooledWalker(fs *Filesystem) *PooledFileWalker {
// Create a worker pool that is the same size as the number of processors available on the // Create a worker pool that is the same size as the number of processors available on the
// system. Going much higher doesn't provide much of a performance boost, and is only more // system. Going much higher doesn't provide much of a performance boost, and is only more
// likely to lead to resource overloading anyways. // likely to lead to resource overloading anyways.
pool: workerpool.New(runtime.GOMAXPROCS(0)), pool: workerpool.New(runtime.NumCPU()),
} }
} }

View File

@@ -36,6 +36,9 @@ func (s *Server) Install(sync bool) error {
} }
} }
// Send the start event so the Panel can automatically update.
s.Events().Publish(InstallStartedEvent, "")
err := s.internalInstall() err := s.internalInstall()
s.Log().Debug("notifying panel of server install state") s.Log().Debug("notifying panel of server install state")
@@ -52,6 +55,10 @@ func (s *Server) Install(sync bool) error {
l.Warn("failed to notify panel of server install state") l.Warn("failed to notify panel of server install state")
} }
// Push an event to the websocket so we can auto-refresh the information in the panel once
// the install is completed.
s.Events().Publish(InstallCompletedEvent, "")
return err return err
} }
@@ -369,7 +376,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
| |
| Details | Details
| ------------------------------ | ------------------------------
Server UUID: {{.Server.Id()}} Server UUID: {{.Server.Id}}
Container Image: {{.Script.ContainerImage}} Container Image: {{.Script.ContainerImage}}
Container Entrypoint: {{.Script.Entrypoint}} Container Entrypoint: {{.Script.Entrypoint}}

View File

@@ -3,10 +3,12 @@ package server
import ( import (
"github.com/apex/log" "github.com/apex/log"
"github.com/creasty/defaults" "github.com/creasty/defaults"
"github.com/gammazero/workerpool"
"github.com/patrickmn/go-cache" "github.com/patrickmn/go-cache"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pterodactyl/wings/api" "github.com/pterodactyl/wings/api"
"github.com/remeh/sizedwaitgroup" "os"
"runtime"
"time" "time"
) )
@@ -23,16 +25,6 @@ func LoadDirectory() error {
return errors.New("cannot call LoadDirectory with a non-nil collection") return errors.New("cannot call LoadDirectory with a non-nil collection")
} }
// We could theoretically use a standard wait group here, however doing
// that introduces the potential to crash the program due to too many
// open files. This wouldn't happen on a small setup, but once the daemon is
// handling many servers you run that risk.
//
// For now just process 10 files at a time, that should be plenty fast to
// read and parse the YAML. We should probably make this configurable down
// the road to help big instances scale better.
wg := sizedwaitgroup.New(10)
configs, rerr, err := api.NewRequester().GetAllServerConfigurations() configs, rerr, err := api.NewRequester().GetAllServerConfigurations()
if err != nil || rerr != nil { if err != nil || rerr != nil {
if err != nil { if err != nil {
@@ -49,12 +41,13 @@ func LoadDirectory() error {
} }
log.WithField("total_configs", len(configs)).Debug("looping over received configurations from API") log.WithField("total_configs", len(configs)).Debug("looping over received configurations from API")
pool := workerpool.New(runtime.NumCPU())
for uuid, data := range configs { for uuid, data := range configs {
wg.Add() uuid := uuid
data := data
go func(uuid string, data *api.ServerConfigurationResponse) {
defer wg.Done()
pool.Submit(func() {
log.WithField("uuid", uuid).Debug("creating server object from configuration") log.WithField("uuid", uuid).Debug("creating server object from configuration")
s, err := FromConfiguration(data) s, err := FromConfiguration(data)
if err != nil { if err != nil {
@@ -68,12 +61,12 @@ func LoadDirectory() error {
} }
servers.Add(s) servers.Add(s)
}(uuid, data) })
} }
// Wait until we've processed all of the configuration files in the directory // Wait until we've processed all of the configuration files in the directory
// before continuing. // before continuing.
wg.Wait() pool.StopWait()
return nil return nil
} }
@@ -111,6 +104,11 @@ func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
Server: s, Server: s,
} }
// If the server's data directory exists, force disk usage calculation.
if _, err := os.Stat(s.Filesystem.Path()); err == nil {
go s.Filesystem.HasSpaceAvailable()
}
// Forces the configuration to be synced with the panel. // Forces the configuration to be synced with the panel.
if err := s.SyncWithConfiguration(data); err != nil { if err := s.SyncWithConfiguration(data); err != nil {
return nil, err return nil, err

View File

@@ -1,12 +1,80 @@
package server package server
type PowerAction struct { import (
Action string `json:"action"` "context"
"github.com/pkg/errors"
"golang.org/x/sync/semaphore"
"os"
"time"
)
type PowerAction string
// The power actions that can be performed for a given server. This taps into the given server
// environment and performs them in a way that prevents a race condition from occurring. For
// example, sending two "start" actions back to back will not process the second action until
// the first action has been completed.
//
// This utilizes a workerpool with a limit of one worker so that all of the actions execute
// in a sync manner.
const (
PowerActionStart = "start"
PowerActionStop = "stop"
PowerActionRestart = "restart"
PowerActionTerminate = "kill"
)
// Checks if the power action being received is valid.
func (pa PowerAction) IsValid() bool {
return pa == PowerActionStart ||
pa == PowerActionStop ||
pa == PowerActionTerminate ||
pa == PowerActionRestart
} }
func (pr *PowerAction) IsValid() bool { // Helper function that can receive a power action and then process the actions that need
return pr.Action == "start" || // to occur for it. This guards against someone calling Start() twice at the same time, or
pr.Action == "stop" || // trying to restart while another restart process is currently running.
pr.Action == "kill" || //
pr.Action == "restart" // However, the code design for the daemon does depend on the user correctly calling this
// function rather than making direct calls to the start/stop/restart functions on the
// environment struct.
func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error {
if s.powerLock == nil {
s.powerLock = semaphore.NewWeighted(1)
}
// Determines if we should wait for the lock or not. If a value greater than 0 is passed
// into this function we will wait that long for a lock to be acquired.
if len(waitSeconds) > 0 && waitSeconds[0] != 0 {
ctx, _ := context.WithTimeout(context.Background(), time.Second*time.Duration(waitSeconds[0]))
// Attempt to acquire a lock on the power action lock for up to 30 seconds. If more
// time than that passes an error will be propagated back up the chain and this
// request will be aborted.
if err := s.powerLock.Acquire(ctx, 1); err != nil {
return errors.WithMessage(err, "could not acquire lock on power state")
}
} else {
// If no wait duration was provided we will attempt to immediately acquire the lock
// and bail out with a context deadline error if it is not acquired immediately.
if ok := s.powerLock.TryAcquire(1); !ok {
return errors.WithMessage(context.DeadlineExceeded, "could not acquire lock on power state")
}
}
// Release the lock once the process being requested has finished executing.
defer s.powerLock.Release(1)
switch action {
case PowerActionStart:
return s.Environment.Start()
case PowerActionStop:
return s.Environment.Stop()
case PowerActionRestart:
return s.Environment.Restart()
case PowerActionTerminate:
return s.Environment.Terminate(os.Kill)
}
return errors.New("attempting to handle unknown power action")
} }

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pterodactyl/wings/api" "github.com/pterodactyl/wings/api"
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
"os"
"strings" "strings"
"sync" "sync"
"time" "time"
@@ -19,6 +18,8 @@ type Server struct {
// Internal mutex used to block actions that need to occur sequentially, such as // Internal mutex used to block actions that need to occur sequentially, such as
// writing the configuration to the disk. // writing the configuration to the disk.
sync.RWMutex sync.RWMutex
emitterLock sync.Mutex
powerLock *semaphore.Weighted
// Maintains the configuration for the server. This is the data that gets returned by the Panel // Maintains the configuration for the server. This is the data that gets returned by the Panel
// such as build settings and container images. // such as build settings and container images.
@@ -157,23 +158,6 @@ func (s *Server) GetProcessConfiguration() (*api.ServerConfigurationResponse, *a
return api.NewRequester().GetServerConfiguration(s.Id()) return api.NewRequester().GetServerConfiguration(s.Id())
} }
// Helper function that can receieve a power action and then process the
// actions that need to occur for it.
func (s *Server) HandlePowerAction(action PowerAction) error {
switch action.Action {
case "start":
return s.Environment.Start()
case "restart":
return s.Environment.Restart()
case "stop":
return s.Environment.Stop()
case "kill":
return s.Environment.Terminate(os.Kill)
default:
return errors.New("an invalid power action was provided")
}
}
// Checks if the server is marked as being suspended or not on the system. // Checks if the server is marked as being suspended or not on the system.
func (s *Server) IsSuspended() bool { func (s *Server) IsSuspended() bool {
return s.Config().Suspended return s.Config().Suspended

View File

@@ -30,8 +30,8 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
// Grab a copy of the configuration to work on. // Grab a copy of the configuration to work on.
c := *s.Config() c := *s.Config()
// Lock our copy of the configuration since the defered unlock will end up acting upon this // Lock our copy of the configuration since the deferred unlock will end up acting upon this
// new memory address rather than the old one. If we don't lock this, the defered unlock will // new memory address rather than the old one. If we don't lock this, the deferred unlock will
// cause a panic when it goes to run. However, since we only update s.cfg at the end, if there // cause a panic when it goes to run. However, since we only update s.cfg at the end, if there
// is an error before that point we'll still properly unlock the original configuration for the // is an error before that point we'll still properly unlock the original configuration for the
// server. // server.