Compare commits
39 Commits
v1.0.0-bet
...
v1.0.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9c81f37b2 | ||
|
|
9d350d845f | ||
|
|
8b0b40e377 | ||
|
|
fef3b001c1 | ||
|
|
834bcf251e | ||
|
|
78c5fd219a | ||
|
|
177aa8e436 | ||
|
|
ecb2cb05ce | ||
|
|
64df3e168f | ||
|
|
881cb84605 | ||
|
|
03ef52c0db | ||
|
|
f889a193bf | ||
|
|
5366d0f652 | ||
|
|
5e8425ad6a | ||
|
|
38efb68e8a | ||
|
|
cf33a2464a | ||
|
|
cecc72110c | ||
|
|
8f1ebdd39f | ||
|
|
776062107b | ||
|
|
da26b4c5c7 | ||
|
|
5889d0585b | ||
|
|
8af26ac864 | ||
|
|
d3843e1d28 | ||
|
|
5b999db7f3 | ||
|
|
afa6fb200e | ||
|
|
b1940426c3 | ||
|
|
43795a4be3 | ||
|
|
b811d2474e | ||
|
|
e85b1cecb7 | ||
|
|
5036077152 | ||
|
|
373dbd355e | ||
|
|
7f9ec4402a | ||
|
|
f0d6f67c6b | ||
|
|
0b761320cc | ||
|
|
db0dc17937 | ||
|
|
79ee259874 | ||
|
|
4d8f06a3e0 | ||
|
|
f567c2c15c | ||
|
|
7a6397bf17 |
7
.github/workflows/release.yml
vendored
7
.github/workflows/release.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
run: go test ./...
|
||||
|
||||
- name: Compress binary and make it executable
|
||||
run: upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
||||
run: upx --brute build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
||||
|
||||
- name: Extract changelog
|
||||
env:
|
||||
@@ -32,7 +32,6 @@ jobs:
|
||||
sed -n "/^## ${REF:10}/,/^## /{/^## /b;p}" CHANGELOG.md > ./RELEASE_CHANGELOG
|
||||
echo ::set-output name=version_name::`sed -nr "s/^## (${REF:10} .*)$/\1/p" CHANGELOG.md`
|
||||
|
||||
|
||||
- name: Create checksum and add to changelog
|
||||
run: |
|
||||
SUM=`cd build && sha256sum wings_linux_amd64`
|
||||
@@ -48,8 +47,8 @@ jobs:
|
||||
git config --local user.name "Pterodactyl CI"
|
||||
git checkout -b $BRANCH
|
||||
git push -u origin $BRANCH
|
||||
sed -i "s/ Version = \".*\"/ Version = \"${REF:11}\"/" config/app.php
|
||||
git add config/app.php
|
||||
sed -i "s/ Version = \".*\"/ Version = \"${REF:11}\"/" system/const.go
|
||||
git add system/const.go
|
||||
git commit -m "bump version for release"
|
||||
git push
|
||||
|
||||
|
||||
35
cmd/root.go
35
cmd/root.go
@@ -3,6 +3,7 @@ package cmd
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"github.com/gammazero/workerpool"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -21,7 +22,6 @@ import (
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/sftp"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@@ -146,13 +146,6 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
}).Info("configured system user successfully")
|
||||
}
|
||||
|
||||
log.Info("beginning file permission setting on server data directories")
|
||||
if err := c.EnsureFilePermissions(); err != nil {
|
||||
log.WithField("error", err).Error("failed to properly chown data directories")
|
||||
} else {
|
||||
log.Info("finished ensuring file permissions")
|
||||
}
|
||||
|
||||
if err := server.LoadDirectory(); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to load server configurations")
|
||||
return
|
||||
@@ -172,19 +165,27 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
log.WithField("server", s.Id()).Info("loaded configuration for server")
|
||||
}
|
||||
|
||||
// Create a new WaitGroup that limits us to 4 servers being bootstrapped at a time
|
||||
if !c.System.SetPermissionsOnBoot {
|
||||
log.Warn("server file permission checking is currently disabled on boot!")
|
||||
}
|
||||
|
||||
// Create a new workerpool that limits us to 4 servers being bootstrapped at a time
|
||||
// on Wings. This allows us to ensure the environment exists, write configurations,
|
||||
// and reboot processes without causing a slow-down due to sequential booting.
|
||||
wg := sizedwaitgroup.New(4)
|
||||
pool := workerpool.New(4)
|
||||
|
||||
for _, serv := range server.GetServers().All() {
|
||||
wg.Add()
|
||||
s := serv
|
||||
|
||||
go func(s *server.Server) {
|
||||
defer wg.Done()
|
||||
pool.Submit(func() {
|
||||
if c.System.SetPermissionsOnBoot {
|
||||
s.Log().Info("chowning server data directory")
|
||||
if err := s.Filesystem.Chown("/"); err != nil {
|
||||
s.Log().WithField("error", err).Warn("error during server data directory chown")
|
||||
}
|
||||
}
|
||||
|
||||
s.Log().Info("ensuring server environment exists")
|
||||
|
||||
// Create a server environment if none exists currently. This allows us to recover from Docker
|
||||
// being reinstalled on the host system for example.
|
||||
if err := s.Environment.Create(); err != nil {
|
||||
@@ -204,7 +205,7 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
// is that it was running, but we see that the container process is not currently running.
|
||||
if r || (!r && s.IsRunning()) {
|
||||
s.Log().Info("detected server is running, re-attaching to process...")
|
||||
if err := s.Environment.Start(); err != nil {
|
||||
if err := s.HandlePowerAction(server.PowerActionStart); err != nil {
|
||||
s.Log().WithField("error", errors.WithStack(err)).Warn("failed to properly start server detected as already running")
|
||||
}
|
||||
|
||||
@@ -214,11 +215,11 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
// Addresses potentially invalid data in the stored file that can cause Wings to lose
|
||||
// track of what the actual server state is.
|
||||
s.SetState(server.ProcessOfflineState)
|
||||
}(serv)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait until all of the servers are ready to go before we fire up the HTTP server.
|
||||
wg.Wait()
|
||||
pool.StopWait()
|
||||
|
||||
// Initalize SFTP.
|
||||
sftp.Initialize(c)
|
||||
|
||||
@@ -1,19 +1,16 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/cobaugh/osrelease"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -81,8 +78,14 @@ type Configuration struct {
|
||||
// to collect data and send events.
|
||||
PanelLocation string `json:"remote" yaml:"remote"`
|
||||
|
||||
// AllowedMounts .
|
||||
// AllowedMounts is a list of allowed host-system mount points.
|
||||
// This is required to have the "Server Mounts" feature work properly.
|
||||
AllowedMounts []string `json:"allowed_mounts" yaml:"allowed_mounts"`
|
||||
|
||||
// AllowedOrigins is a list of allowed request origins.
|
||||
// The Panel URL is automatically allowed, this is only needed for adding
|
||||
// additional origins.
|
||||
AllowedOrigins []string `json:"allowed_origins" yaml:"allowed_origins"`
|
||||
}
|
||||
|
||||
// Defines the configuration of the internal SFTP server.
|
||||
@@ -148,7 +151,7 @@ func ReadConfiguration(path string) (*Configuration, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
var Mutex sync.RWMutex
|
||||
var mu sync.RWMutex
|
||||
|
||||
var _config *Configuration
|
||||
var _jwtAlgo *jwt.HMACSHA
|
||||
@@ -158,14 +161,14 @@ var _debugViaFlag bool
|
||||
// anything trying to set a different configuration value, or read the configuration
|
||||
// will be paused until it is complete.
|
||||
func Set(c *Configuration) {
|
||||
Mutex.Lock()
|
||||
mu.Lock()
|
||||
|
||||
if _config == nil || _config.AuthenticationToken != c.AuthenticationToken {
|
||||
_jwtAlgo = jwt.NewHS256([]byte(c.AuthenticationToken))
|
||||
}
|
||||
|
||||
_config = c
|
||||
Mutex.Unlock()
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
func SetDebugViaFlag(d bool) {
|
||||
@@ -175,16 +178,16 @@ func SetDebugViaFlag(d bool) {
|
||||
// Get the global configuration instance. This is a read-safe operation that will block
|
||||
// if the configuration is presently being modified.
|
||||
func Get() *Configuration {
|
||||
Mutex.RLock()
|
||||
defer Mutex.RUnlock()
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
return _config
|
||||
}
|
||||
|
||||
// Returns the in-memory JWT algorithm.
|
||||
func GetJwtAlgorithm() *jwt.HMACSHA {
|
||||
Mutex.RLock()
|
||||
defer Mutex.RUnlock()
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
return _jwtAlgo
|
||||
}
|
||||
@@ -193,7 +196,7 @@ func GetJwtAlgorithm() *jwt.HMACSHA {
|
||||
func NewFromPath(path string) (*Configuration, error) {
|
||||
c := new(Configuration)
|
||||
if err := defaults.Set(c); err != nil {
|
||||
return c, err
|
||||
return c, errors.WithStack(err)
|
||||
}
|
||||
|
||||
c.unsafeSetPath(path)
|
||||
@@ -231,12 +234,12 @@ func (c *Configuration) EnsurePterodactylUser() (*user.User, error) {
|
||||
if err == nil {
|
||||
return u, c.setSystemUser(u)
|
||||
} else if _, ok := err.(user.UnknownUserError); !ok {
|
||||
return nil, err
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
sysName, err := getSystemName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
var command = fmt.Sprintf("useradd --system --no-create-home --shell /bin/false %s", c.System.Username)
|
||||
@@ -249,17 +252,17 @@ func (c *Configuration) EnsurePterodactylUser() (*user.User, error) {
|
||||
// We have to create the group first on Alpine, so do that here before continuing on
|
||||
// to the user creation process.
|
||||
if _, err := exec.Command("addgroup", "-S", c.System.Username).Output(); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
split := strings.Split(command, " ")
|
||||
if _, err := exec.Command(split[0], split[1:]...).Output(); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if u, err := user.Lookup(c.System.Username); err != nil {
|
||||
return nil, err
|
||||
return nil, errors.WithStack(err)
|
||||
} else {
|
||||
return u, c.setSystemUser(u)
|
||||
}
|
||||
@@ -280,58 +283,6 @@ func (c *Configuration) setSystemUser(u *user.User) error {
|
||||
return c.WriteToDisk()
|
||||
}
|
||||
|
||||
// Ensures that the configured data directory has the correct permissions assigned to
|
||||
// all of the files and folders within.
|
||||
func (c *Configuration) EnsureFilePermissions() error {
|
||||
// Don't run this unless it is configured to be run. On large system this can often slow
|
||||
// things down dramatically during the boot process.
|
||||
if !c.System.SetPermissionsOnBoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
r := regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$")
|
||||
|
||||
files, err := ioutil.ReadDir(c.System.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
su, err := user.Lookup(c.System.Username)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
for _, file := range files {
|
||||
wg.Add(1)
|
||||
|
||||
// Asynchronously run through the list of files and folders in the data directory. If
|
||||
// the item is not a folder, or is not a folder that matches the expected UUIDv4 format
|
||||
// skip over it.
|
||||
//
|
||||
// If we do have a positive match, run a chown against the directory.
|
||||
go func(f os.FileInfo) {
|
||||
defer wg.Done()
|
||||
|
||||
if !f.IsDir() || !r.MatchString(f.Name()) {
|
||||
return
|
||||
}
|
||||
|
||||
uid, _ := strconv.Atoi(su.Uid)
|
||||
gid, _ := strconv.Atoi(su.Gid)
|
||||
|
||||
if err := os.Chown(path.Join(c.System.Data, f.Name()), uid, gid); err != nil {
|
||||
log.WithField("error", err).WithField("directory", f.Name()).Warn("failed to chown server directory")
|
||||
}
|
||||
}(file)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writes the configuration to the disk as a blocking operation by obtaining an exclusive
|
||||
// lock on the file. This prevents something else from writing at the exact same time and
|
||||
// leading to bad data conditions.
|
||||
@@ -353,11 +304,11 @@ func (c *Configuration) WriteToDisk() error {
|
||||
|
||||
b, err := yaml.Marshal(&ccopy)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(c.GetPath(), b, 0644); err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -367,7 +318,7 @@ func (c *Configuration) WriteToDisk() error {
|
||||
func getSystemName() (string, error) {
|
||||
// use osrelease to get release version and ID
|
||||
if release, err := osrelease.Read(); err != nil {
|
||||
return "", err
|
||||
return "", errors.WithStack(err)
|
||||
} else {
|
||||
return release["ID"], nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerNetworkInterfaces struct {
|
||||
V4 struct {
|
||||
Subnet string `default:"172.18.0.0/16"`
|
||||
@@ -53,4 +60,28 @@ type DockerConfiguration struct {
|
||||
// Defines the location of the timezone file on the host system that should
|
||||
// be mounted into the created containers so that they all use the same time.
|
||||
TimezonePath string `default:"/etc/timezone" json:"timezone_path" yaml:"timezone_path"`
|
||||
|
||||
// Registries .
|
||||
Registries map[string]RegistryConfiguration `json:"registries" yaml:"registries"`
|
||||
}
|
||||
|
||||
// RegistryConfiguration .
|
||||
type RegistryConfiguration struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
}
|
||||
|
||||
// Base64 .
|
||||
func (c RegistryConfiguration) Base64() (string, error) {
|
||||
authConfig := types.AuthConfig{
|
||||
Username: c.Username,
|
||||
Password: c.Password,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(authConfig)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
return base64.URLEncoding.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func (h *Handler) HandleLog(e *log.Entry) error {
|
||||
}
|
||||
|
||||
func getErrorStack(err error, i bool) errors.StackTrace {
|
||||
e, ok := errors.Cause(err).(tracer)
|
||||
e, ok := err.(tracer)
|
||||
if !ok {
|
||||
if i {
|
||||
// Just abort out of this and return a stacktrace leading up to this point. It isn't perfect
|
||||
@@ -89,7 +89,7 @@ func getErrorStack(err error, i bool) errors.StackTrace {
|
||||
return errors.Wrap(err, "failed to generate stacktrace for caught error").(tracer).StackTrace()
|
||||
}
|
||||
|
||||
return getErrorStack(errors.New(err.Error()), true)
|
||||
return getErrorStack(errors.Wrap(err, err.Error()), true)
|
||||
}
|
||||
|
||||
st := e.StackTrace()
|
||||
|
||||
@@ -11,8 +11,22 @@ import (
|
||||
|
||||
// Set the access request control headers on all of the requests.
|
||||
func SetAccessControlHeaders(c *gin.Context) {
|
||||
c.Header("Access-Control-Allow-Origin", config.Get().PanelLocation)
|
||||
c.Header("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
|
||||
|
||||
o := c.GetHeader("Origin")
|
||||
if o != config.Get().PanelLocation {
|
||||
for _, origin := range config.Get().AllowedOrigins {
|
||||
if o != origin {
|
||||
continue
|
||||
}
|
||||
|
||||
c.Header("Access-Control-Allow-Origin", origin)
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.Header("Access-Control-Allow-Origin", config.Get().PanelLocation)
|
||||
c.Next()
|
||||
}
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ func Configure() *gin.Engine {
|
||||
// These routes use signed URLs to validate access to the resource being requested.
|
||||
router.GET("/download/backup", getDownloadBackup)
|
||||
router.GET("/download/file", getDownloadFile)
|
||||
router.POST("/upload/file", postServerUploadFiles)
|
||||
|
||||
// This route is special it sits above all of the other requests because we are
|
||||
// using a JWT to authorize access to it, therefore it needs to be publicly
|
||||
|
||||
@@ -2,6 +2,7 @@ package router
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
@@ -13,7 +14,9 @@ import (
|
||||
|
||||
// Returns a single server from the collection of servers.
|
||||
func getServer(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, GetServer(c.Param("server")))
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
c.JSON(http.StatusOK, s.Proc())
|
||||
}
|
||||
|
||||
// Returns the logs for a given server instance.
|
||||
@@ -45,13 +48,15 @@ func getServerLogs(c *gin.Context) {
|
||||
func postServerPower(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
var data server.PowerAction
|
||||
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||
var data struct{
|
||||
Action server.PowerAction `json:"action"`
|
||||
}
|
||||
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !data.IsValid() {
|
||||
if !data.Action.IsValid() {
|
||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||
"error": "The power action provided was not valid, should be one of \"stop\", \"start\", \"restart\", \"kill\"",
|
||||
})
|
||||
@@ -64,7 +69,7 @@ func postServerPower(c *gin.Context) {
|
||||
//
|
||||
// We don't really care about any of the other actions at this point, they'll all result
|
||||
// in the process being stopped, which should have happened anyways if the server is suspended.
|
||||
if (data.Action == "start" || data.Action == "restart") && s.IsSuspended() {
|
||||
if (data.Action == server.PowerActionStart || data.Action == server.PowerActionRestart) && s.IsSuspended() {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Cannot start or restart a server that is suspended.",
|
||||
})
|
||||
@@ -74,10 +79,15 @@ func postServerPower(c *gin.Context) {
|
||||
// Pass the actual heavy processing off to a seperate thread to handle so that
|
||||
// we can immediately return a response from the server. Some of these actions
|
||||
// can take quite some time, especially stopping or restarting.
|
||||
go func(server *server.Server) {
|
||||
if err := server.HandlePowerAction(data); err != nil {
|
||||
server.Log().WithFields(log.Fields{"action": data, "error": err}).
|
||||
Error("encountered error processing a server power action in the background")
|
||||
go func(s *server.Server) {
|
||||
if err := s.HandlePowerAction(data.Action, 30); err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
s.Log().WithField("action", data.Action).
|
||||
Warn("could not acquire a lock while attempting to perform a power action")
|
||||
} else {
|
||||
s.Log().WithFields(log.Fields{"action": data, "error": err}).
|
||||
Error("encountered error processing a server power action in the background")
|
||||
}
|
||||
}
|
||||
}(s)
|
||||
|
||||
|
||||
@@ -4,12 +4,16 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -129,7 +133,17 @@ func putServerRenameFiles(c *gin.Context) {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
return s.Filesystem.Rename(pf, pt)
|
||||
if err := s.Filesystem.Rename(pf, pt); err != nil {
|
||||
// Return nil if the error is an is not exists.
|
||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -155,6 +169,13 @@ func postServerCopyFile(c *gin.Context) {
|
||||
}
|
||||
|
||||
if err := s.Filesystem.Copy(data.Location); err != nil {
|
||||
// Check if the file does not exist.
|
||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
c.Status(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
@@ -177,7 +198,7 @@ func postServerDeleteFiles(c *gin.Context) {
|
||||
|
||||
if len(data.Files) == 0 {
|
||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||
"error": "No files were specififed for deletion.",
|
||||
"error": "No files were specified for deletion.",
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -311,9 +332,86 @@ func postServerDecompressFiles(c *gin.Context) {
|
||||
}
|
||||
|
||||
if err := s.Filesystem.DecompressFile(data.RootPath, data.File); err != nil {
|
||||
// Check if the file does not exist.
|
||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
c.Status(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func postServerUploadFiles(c *gin.Context) {
|
||||
token := tokens.UploadPayload{}
|
||||
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
||||
TrackedError(err).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
|
||||
s := GetServer(token.ServerUuid)
|
||||
if s == nil || !token.IsUniqueRequest() {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested resource was not found on this server.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if !s.Filesystem.HasSpaceAvailable() {
|
||||
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
||||
"error": "This server does not have enough available disk space to accept any file uploads.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
form, err := c.MultipartForm()
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Failed to get multipart form.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
headers, ok := form.File["files"]
|
||||
if !ok {
|
||||
c.AbortWithStatusJSON(http.StatusNotModified, gin.H{
|
||||
"error": "No files were attached to the request.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
directory := c.Query("directory")
|
||||
|
||||
for _, header := range headers {
|
||||
p, err := s.Filesystem.SafePath(filepath.Join(directory, header.Filename))
|
||||
if err != nil {
|
||||
c.AbortWithError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
// We run this in a different method so I can use defer without any of
|
||||
// the consequences caused by calling it in a loop.
|
||||
if err := handleFileUpload(p, s, header); err != nil {
|
||||
c.AbortWithError(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader) error {
|
||||
file, err := header.Open()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if err := s.Filesystem.Writefile(p, file); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
type BackupPayload struct {
|
||||
jwt.Payload
|
||||
|
||||
ServerUuid string `json:"server_uuid"`
|
||||
BackupUuid string `json:"backup_uuid"`
|
||||
UniqueId string `json:"unique_id"`
|
||||
@@ -22,4 +23,4 @@ func (p *BackupPayload) GetPayload() *jwt.Payload {
|
||||
// validates all of the request.
|
||||
func (p *BackupPayload) IsUniqueRequest() bool {
|
||||
return getTokenStore().IsValidToken(p.UniqueId)
|
||||
}
|
||||
}
|
||||
|
||||
25
router/tokens/upload.go
Normal file
25
router/tokens/upload.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package tokens
|
||||
|
||||
import (
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
)
|
||||
|
||||
type UploadPayload struct {
|
||||
jwt.Payload
|
||||
|
||||
ServerUuid string `json:"server_uuid"`
|
||||
UniqueId string `json:"unique_id"`
|
||||
}
|
||||
|
||||
// Returns the JWT payload.
|
||||
func (p *UploadPayload) GetPayload() *jwt.Payload {
|
||||
return &p.Payload
|
||||
}
|
||||
|
||||
// Determines if this JWT is valid for the given request cycle. If the
|
||||
// unique ID passed in the token has already been seen before this will
|
||||
// return false. This allows us to use this JWT as a one-time token that
|
||||
// validates all of the request.
|
||||
func (p *UploadPayload) IsUniqueRequest() bool {
|
||||
return getTokenStore().IsValidToken(p.UniqueId)
|
||||
}
|
||||
@@ -26,9 +26,9 @@ func (h *Handler) ListenForExpiration(ctx context.Context) {
|
||||
jwt := h.GetJwt()
|
||||
if jwt != nil {
|
||||
if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 0 {
|
||||
h.SendJson(&Message{Event: TokenExpiredEvent})
|
||||
_ = h.SendJson(&Message{Event: TokenExpiredEvent})
|
||||
} else if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 180 {
|
||||
h.SendJson(&Message{Event: TokenExpiringEvent})
|
||||
_ = h.SendJson(&Message{Event: TokenExpiringEvent})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,6 +43,8 @@ func (h *Handler) ListenForServerEvents(ctx context.Context) {
|
||||
server.StatusEvent,
|
||||
server.ConsoleOutputEvent,
|
||||
server.InstallOutputEvent,
|
||||
server.InstallStartedEvent,
|
||||
server.InstallCompletedEvent,
|
||||
server.DaemonMessageEvent,
|
||||
server.BackupCompletedEvent,
|
||||
}
|
||||
@@ -52,17 +54,16 @@ func (h *Handler) ListenForServerEvents(ctx context.Context) {
|
||||
h.server.Events().Subscribe(event, eventChannel)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
for _, event := range events {
|
||||
h.server.Events().Unsubscribe(event, eventChannel)
|
||||
}
|
||||
for d := range eventChannel {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
for _, event := range events {
|
||||
h.server.Events().Unsubscribe(event, eventChannel)
|
||||
}
|
||||
|
||||
close(eventChannel)
|
||||
default:
|
||||
// Listen for different events emitted by the server and respond to them appropriately.
|
||||
for d := range eventChannel {
|
||||
h.SendJson(&Message{
|
||||
close(eventChannel)
|
||||
default:
|
||||
_ = h.SendJson(&Message{
|
||||
Event: d.Topic,
|
||||
Args: []string{d.Data},
|
||||
})
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
@@ -12,7 +13,6 @@ import (
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -57,7 +57,20 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
|
||||
// Ensure that the websocket request is originating from the Panel itself,
|
||||
// and not some other location.
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
return r.Header.Get("Origin") == config.Get().PanelLocation
|
||||
o := r.Header.Get("Origin")
|
||||
if o == config.Get().PanelLocation {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, origin := range config.Get().AllowedOrigins {
|
||||
if o != origin {
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
},
|
||||
}
|
||||
|
||||
@@ -137,7 +150,7 @@ func (h *Handler) TokenValid() error {
|
||||
// Sends an error back to the connected websocket instance by checking the permissions
|
||||
// of the token. If the user has the "receive-errors" grant we will send back the actual
|
||||
// error message, otherwise we just send back a standard error message.
|
||||
func (h *Handler) SendErrorJson(msg Message, err error) error {
|
||||
func (h *Handler) SendErrorJson(msg Message, err error, shouldLog ...bool) error {
|
||||
j := h.GetJwt()
|
||||
|
||||
message := "an unexpected error was encountered while handling this request"
|
||||
@@ -150,9 +163,11 @@ func (h *Handler) SendErrorJson(msg Message, err error) error {
|
||||
wsm := Message{Event: ErrorEvent}
|
||||
wsm.Args = []string{m}
|
||||
|
||||
if !server.IsSuspendedError(err) {
|
||||
h.server.Log().WithFields(log.Fields{"event": msg.Event, "error_identifier": u.String(), "error": err}).
|
||||
Error("failed to handle websocket process; an error was encountered processing an event")
|
||||
if len(shouldLog) == 0 || (len(shouldLog) == 1 && shouldLog[0] == true) {
|
||||
if !server.IsSuspendedError(err) {
|
||||
h.server.Log().WithFields(log.Fields{"event": msg.Event, "error_identifier": u.String(), "error": err}).
|
||||
Error("failed to handle websocket process; an error was encountered processing an event")
|
||||
}
|
||||
}
|
||||
|
||||
return h.unsafeSendJson(wsm)
|
||||
@@ -258,37 +273,34 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
}
|
||||
case SetStateEvent:
|
||||
{
|
||||
switch strings.Join(m.Args, "") {
|
||||
case "start":
|
||||
if h.GetJwt().HasPermission(PermissionSendPowerStart) {
|
||||
return h.server.Environment.Start()
|
||||
}
|
||||
break
|
||||
case "stop":
|
||||
if h.GetJwt().HasPermission(PermissionSendPowerStop) {
|
||||
return h.server.Environment.Stop()
|
||||
}
|
||||
break
|
||||
case "restart":
|
||||
if h.GetJwt().HasPermission(PermissionSendPowerRestart) {
|
||||
// If the server is alreay restarting don't do anything. Perhaps we send back an event
|
||||
// in the future for this? For now no reason to knowingly trigger an error by trying to
|
||||
// restart a process already restarting.
|
||||
if h.server.Environment.IsRestarting() {
|
||||
return nil
|
||||
}
|
||||
action := server.PowerAction(strings.Join(m.Args, ""))
|
||||
|
||||
return h.server.Environment.Restart()
|
||||
actions := make(map[server.PowerAction]string)
|
||||
actions[server.PowerActionStart] = PermissionSendPowerStart
|
||||
actions[server.PowerActionStop] = PermissionSendPowerStop
|
||||
actions[server.PowerActionRestart] = PermissionSendPowerRestart
|
||||
actions[server.PowerActionTerminate] = PermissionSendPowerStop
|
||||
|
||||
// Check that they have permission to perform this action if it is needed.
|
||||
if permission, exists := actions[action]; exists {
|
||||
if !h.GetJwt().HasPermission(permission) {
|
||||
return nil
|
||||
}
|
||||
break
|
||||
case "kill":
|
||||
if h.GetJwt().HasPermission(PermissionSendPowerStop) {
|
||||
return h.server.Environment.Terminate(os.Kill)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
err := h.server.HandlePowerAction(action)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
m, _ := h.GetErrorMessage("another power action is currently being processed for this server, please try again later")
|
||||
|
||||
h.SendJson(&Message{
|
||||
Event: ErrorEvent,
|
||||
Args: []string{m},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
case SendServerLogsEvent:
|
||||
{
|
||||
|
||||
@@ -1,34 +1,32 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/pterodactyl/wings/parser"
|
||||
"sync"
|
||||
"github.com/gammazero/workerpool"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Parent function that will update all of the defined configuration files for a server
|
||||
// automatically to ensure that they always use the specified values.
|
||||
func (s *Server) UpdateConfigurationFiles() {
|
||||
wg := new(sync.WaitGroup)
|
||||
pool := workerpool.New(runtime.NumCPU())
|
||||
|
||||
files := s.ProcessConfiguration().ConfigurationFiles
|
||||
for _, v := range files {
|
||||
wg.Add(1)
|
||||
for _, cf := range files {
|
||||
f := cf
|
||||
|
||||
go func(f parser.ConfigurationFile, server *Server) {
|
||||
defer wg.Done()
|
||||
|
||||
p, err := server.Filesystem.SafePath(f.FileName)
|
||||
pool.Submit(func() {
|
||||
p, err := s.Filesystem.SafePath(f.FileName)
|
||||
if err != nil {
|
||||
server.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
|
||||
s.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if err := f.Parse(p, false); err != nil {
|
||||
server.Log().WithField("error", err).Error("failed to parse and update server configuration file")
|
||||
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
|
||||
}
|
||||
}(v, s)
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
pool.StopWait()
|
||||
}
|
||||
@@ -82,5 +82,5 @@ func (s *Server) handleServerCrash() error {
|
||||
|
||||
s.crasher.SetLastCrash(time.Now())
|
||||
|
||||
return s.Environment.Start()
|
||||
return s.HandlePowerAction(PowerActionStart)
|
||||
}
|
||||
@@ -36,7 +36,6 @@ type Environment interface {
|
||||
// unnecessary double/triple/quad looping issues if multiple people press restart or spam the
|
||||
// button to restart.
|
||||
Restart() error
|
||||
IsRestarting() bool
|
||||
|
||||
// Waits for a server instance to stop gracefully. If the server is still detected
|
||||
// as running after seconds, an error will be returned, or the server will be terminated
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -35,28 +34,19 @@ type DockerEnvironment struct {
|
||||
// The Docker client being used for this instance.
|
||||
Client *client.Client
|
||||
|
||||
// Tracks if we are currently attached to the server container. This allows us to attach
|
||||
// once and then just use that attachment to stream logs out of the server and also stream
|
||||
// commands back into it without constantly attaching and detaching.
|
||||
attached bool
|
||||
|
||||
// Controls the hijacked response stream which exists only when we're attached to
|
||||
// the running container instance.
|
||||
stream types.HijackedResponse
|
||||
stream *types.HijackedResponse
|
||||
|
||||
// Holds the stats stream used by the polling commands so that we can easily close
|
||||
// it out.
|
||||
stats io.ReadCloser
|
||||
|
||||
// Locks when we're performing a restart to avoid trying to restart a process that is already
|
||||
// being restarted.
|
||||
restartSem *semaphore.Weighted
|
||||
}
|
||||
|
||||
// Set if this process is currently attached to the process.
|
||||
func (d *DockerEnvironment) SetAttached(a bool) {
|
||||
func (d *DockerEnvironment) SetStream(s *types.HijackedResponse) {
|
||||
d.Lock()
|
||||
d.attached = a
|
||||
d.stream = s
|
||||
d.Unlock()
|
||||
}
|
||||
|
||||
@@ -65,7 +55,7 @@ func (d *DockerEnvironment) IsAttached() bool {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
return d.attached
|
||||
return d.stream != nil
|
||||
}
|
||||
|
||||
// Creates a new base Docker environment. A server must still be attached to it.
|
||||
@@ -313,7 +303,7 @@ func (d *DockerEnvironment) Stop() error {
|
||||
// If the container does not exist just mark the process as stopped and return without
|
||||
// an error.
|
||||
if client.IsErrNotFound(err) {
|
||||
d.SetAttached(false)
|
||||
d.SetStream(nil)
|
||||
d.Server.SetState(ProcessOfflineState)
|
||||
|
||||
return nil
|
||||
@@ -325,60 +315,19 @@ func (d *DockerEnvironment) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try to acquire a lock to restart the server. If one cannot be obtained within 5 seconds return
|
||||
// an error to the caller. You should ideally be checking IsRestarting() before calling this function
|
||||
// to avoid unnecessary delays since you can respond immediately from that.
|
||||
func (d *DockerEnvironment) acquireRestartLock() error {
|
||||
if d.restartSem == nil {
|
||||
d.restartSem = semaphore.NewWeighted(1)
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
|
||||
|
||||
return d.restartSem.Acquire(ctx, 1)
|
||||
}
|
||||
|
||||
// Restarts the server process by waiting for the process to gracefully stop and then triggering a
|
||||
// start command. This will return an error if there is already a restart process executing for the
|
||||
// server. The lock is released when the process is stopped and a start has begun.
|
||||
func (d *DockerEnvironment) Restart() error {
|
||||
d.Server.Log().Debug("attempting to acquire restart lock...")
|
||||
if err := d.acquireRestartLock(); err != nil {
|
||||
d.Server.Log().Warn("failed to acquire restart lock; already acquired by a different process")
|
||||
return err
|
||||
}
|
||||
|
||||
d.Server.Log().Debug("acquired restart lock")
|
||||
|
||||
err := d.WaitForStop(60, false)
|
||||
if err != nil {
|
||||
d.restartSem.Release(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Release the restart lock, it is now safe for someone to attempt restarting the server again.
|
||||
d.restartSem.Release(1)
|
||||
|
||||
// Start the process.
|
||||
return d.Start()
|
||||
}
|
||||
|
||||
// Check if the server is currently running the restart process by checking if there is a semaphore
|
||||
// allocated, and if so, if we can aquire a lock on it.
|
||||
func (d *DockerEnvironment) IsRestarting() bool {
|
||||
if d.restartSem == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.restartSem.TryAcquire(1) {
|
||||
d.restartSem.Release(1)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Attempts to gracefully stop a server using the defined stop command. If the server
|
||||
// does not stop after seconds have passed, an error will be returned, or the instance
|
||||
// will be terminated forcefully depending on the value of the second argument.
|
||||
@@ -469,7 +418,7 @@ func (d *DockerEnvironment) ExitState() (uint32, bool, error) {
|
||||
//
|
||||
// However, someone reported an error in Discord about this scenario happening,
|
||||
// so I guess this should prevent it? They didn't tell me how they caused it though
|
||||
// so thats a mystery that will have to go unsolved.
|
||||
// so that's a mystery that will have to go unsolved.
|
||||
//
|
||||
// @see https://github.com/pterodactyl/panel/issues/2003
|
||||
if client.IsErrNotFound(err) {
|
||||
@@ -495,23 +444,24 @@ func (d *DockerEnvironment) Attach() error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
var err error
|
||||
d.stream, err = d.Client.ContainerAttach(context.Background(), d.Server.Id(), types.ContainerAttachOptions{
|
||||
opts := types.ContainerAttachOptions{
|
||||
Stdin: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Stream: true,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Set the stream again with the container.
|
||||
if st, err := d.Client.ContainerAttach(context.Background(), d.Server.Id(), opts); err != nil {
|
||||
return errors.WithStack(err)
|
||||
} else {
|
||||
d.SetStream(&st)
|
||||
}
|
||||
|
||||
console := Console{
|
||||
Server: d.Server,
|
||||
}
|
||||
|
||||
d.SetAttached(true)
|
||||
go func() {
|
||||
if err := d.EnableResourcePolling(); err != nil {
|
||||
d.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to enable resource polling on server")
|
||||
@@ -522,7 +472,7 @@ func (d *DockerEnvironment) Attach() error {
|
||||
defer d.stream.Close()
|
||||
defer func() {
|
||||
d.Server.SetState(ProcessOfflineState)
|
||||
d.SetAttached(false)
|
||||
d.SetStream(nil)
|
||||
}()
|
||||
|
||||
io.Copy(console, d.stream.Reader)
|
||||
@@ -654,7 +604,33 @@ func (d *DockerEnvironment) ensureImageExists() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
|
||||
defer cancel()
|
||||
|
||||
out, err := d.Client.ImagePull(ctx, d.Image(), types.ImagePullOptions{All: false})
|
||||
image := d.Image()
|
||||
|
||||
// Get a registry auth configuration from the config.
|
||||
var registryAuth *config.RegistryConfiguration
|
||||
for registry, c := range config.Get().Docker.Registries {
|
||||
if !strings.HasPrefix(image, registry) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("registry", registry).Debug("using authentication for repository")
|
||||
registryAuth = &c
|
||||
break
|
||||
}
|
||||
|
||||
// Get the ImagePullOptions.
|
||||
imagePullOptions := types.ImagePullOptions{All: false}
|
||||
if registryAuth != nil {
|
||||
b64, err := registryAuth.Base64()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get registry auth credentials")
|
||||
}
|
||||
|
||||
// b64 is a string so if there is an error it will just be empty, not nil.
|
||||
imagePullOptions.RegistryAuth = b64
|
||||
}
|
||||
|
||||
out, err := d.Client.ImagePull(ctx, image, imagePullOptions)
|
||||
if err != nil {
|
||||
images, ierr := d.Client.ImageList(ctx, types.ImageListOptions{})
|
||||
if ierr != nil {
|
||||
@@ -740,49 +716,25 @@ func (d *DockerEnvironment) Create() error {
|
||||
},
|
||||
}
|
||||
|
||||
mounts := []mount.Mount{
|
||||
{
|
||||
Target: "/home/container",
|
||||
Source: d.Server.Filesystem.Path(),
|
||||
Type: mount.TypeBind,
|
||||
ReadOnly: false,
|
||||
},
|
||||
mounts, err := d.getContainerMounts()
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "could not build container mount points slice")
|
||||
}
|
||||
|
||||
var mounted bool
|
||||
for _, m := range d.Server.Config().Mounts {
|
||||
mounted = false
|
||||
source := filepath.Clean(m.Source)
|
||||
target := filepath.Clean(m.Target)
|
||||
customMounts, err := d.getCustomMounts()
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "could not build custom container mount points slice")
|
||||
}
|
||||
|
||||
for _, allowed := range config.Get().AllowedMounts {
|
||||
if !strings.HasPrefix(source, allowed) {
|
||||
continue
|
||||
}
|
||||
if len(customMounts) > 0 {
|
||||
mounts = append(mounts, customMounts...)
|
||||
|
||||
mounts = append(mounts, mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
|
||||
Source: source,
|
||||
Target: target,
|
||||
ReadOnly: m.ReadOnly,
|
||||
})
|
||||
|
||||
mounted = true
|
||||
break
|
||||
}
|
||||
|
||||
logger := log.WithFields(log.Fields{
|
||||
"server": d.Server.Id(),
|
||||
"source_path": source,
|
||||
"target_path": target,
|
||||
"read_only": m.ReadOnly,
|
||||
})
|
||||
|
||||
if mounted {
|
||||
logger.Debug("attaching mount to server's container")
|
||||
} else {
|
||||
logger.Warn("skipping mount because it isn't allowed")
|
||||
for _, m := range customMounts {
|
||||
d.Server.Log().WithFields(log.Fields{
|
||||
"source_path": m.Source,
|
||||
"target_path": m.Target,
|
||||
"read_only": m.ReadOnly,
|
||||
}).Debug("attaching custom server mount point to container")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -833,9 +785,96 @@ func (d *DockerEnvironment) Create() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the default container mounts for the server instance. This includes the data directory
|
||||
// for the server as well as any timezone related files if they exist on the host system so that
|
||||
// servers running within the container will use the correct time.
|
||||
func (d *DockerEnvironment) getContainerMounts() ([]mount.Mount, error) {
|
||||
var m []mount.Mount
|
||||
|
||||
m = append(m, mount.Mount{
|
||||
Target: "/home/container",
|
||||
Source: d.Server.Filesystem.Path(),
|
||||
Type: mount.TypeBind,
|
||||
ReadOnly: false,
|
||||
})
|
||||
|
||||
// Try to mount in /etc/localtime and /etc/timezone if they exist on the host system.
|
||||
if _, err := os.Stat("/etc/localtime"); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
m = append(m, mount.Mount{
|
||||
Target: "/etc/localtime",
|
||||
Source: "/etc/localtime",
|
||||
Type: mount.TypeBind,
|
||||
ReadOnly: true,
|
||||
})
|
||||
}
|
||||
|
||||
if _, err := os.Stat("/etc/timezone"); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
m = append(m, mount.Mount{
|
||||
Target: "/etc/timezone",
|
||||
Source: "/etc/timezone",
|
||||
Type: mount.TypeBind,
|
||||
ReadOnly: true,
|
||||
})
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Returns the custom mounts for a given server after verifying that they are within a list of
|
||||
// allowed mount points for the node.
|
||||
func (d *DockerEnvironment) getCustomMounts() ([]mount.Mount, error) {
|
||||
var mounts []mount.Mount
|
||||
|
||||
// TODO: probably need to handle things trying to mount directories that do not exist.
|
||||
for _, m := range d.Server.Config().Mounts {
|
||||
source := filepath.Clean(m.Source)
|
||||
target := filepath.Clean(m.Target)
|
||||
|
||||
logger := d.Server.Log().WithFields(log.Fields{
|
||||
"source_path": source,
|
||||
"target_path": target,
|
||||
"read_only": m.ReadOnly,
|
||||
})
|
||||
|
||||
mounted := false
|
||||
for _, allowed := range config.Get().AllowedMounts {
|
||||
if !strings.HasPrefix(source, allowed) {
|
||||
continue
|
||||
}
|
||||
|
||||
mounted = true
|
||||
mounts = append(mounts, mount.Mount{
|
||||
Source: source,
|
||||
Target: target,
|
||||
Type: mount.TypeBind,
|
||||
ReadOnly: m.ReadOnly,
|
||||
})
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if !mounted {
|
||||
logger.Warn("skipping custom server mount, not in list of allowed mount points")
|
||||
}
|
||||
}
|
||||
|
||||
return mounts, nil
|
||||
}
|
||||
|
||||
// Sends the specified command to the stdin of the running container instance. There is no
|
||||
// confirmation that this data is sent successfully, only that it gets pushed into the stdin.
|
||||
func (d *DockerEnvironment) SendCommand(c string) error {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
if !d.IsAttached() {
|
||||
return errors.New("attempting to send command to non-attached instance")
|
||||
}
|
||||
@@ -928,7 +967,7 @@ func (d *DockerEnvironment) portBindings() nat.PortMap {
|
||||
for ip, ports := range d.Server.Config().Allocations.Mappings {
|
||||
for _, port := range ports {
|
||||
// Skip over invalid ports.
|
||||
if port < 0 || port > 65535 {
|
||||
if port < 1 || port > 65535 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
// Defines all of the possible output events for a server.
|
||||
// noinspection GoNameStartsWithPackageName
|
||||
const (
|
||||
DaemonMessageEvent = "daemon message"
|
||||
InstallOutputEvent = "install output"
|
||||
ConsoleOutputEvent = "console output"
|
||||
StatusEvent = "status"
|
||||
StatsEvent = "stats"
|
||||
BackupCompletedEvent = "backup completed"
|
||||
DaemonMessageEvent = "daemon message"
|
||||
InstallOutputEvent = "install output"
|
||||
InstallStartedEvent = "install started"
|
||||
InstallCompletedEvent = "install completed"
|
||||
ConsoleOutputEvent = "console output"
|
||||
StatusEvent = "status"
|
||||
StatsEvent = "stats"
|
||||
BackupCompletedEvent = "backup completed"
|
||||
)
|
||||
|
||||
type Event struct {
|
||||
@@ -25,14 +27,17 @@ type Event struct {
|
||||
type EventBus struct {
|
||||
sync.RWMutex
|
||||
|
||||
subscribers map[string][]chan Event
|
||||
subscribers map[string]map[chan Event]struct{}
|
||||
}
|
||||
|
||||
// Returns the server's emitter instance.
|
||||
func (s *Server) Events() *EventBus {
|
||||
s.emitterLock.Lock()
|
||||
defer s.emitterLock.Unlock()
|
||||
|
||||
if s.emitter == nil {
|
||||
s.emitter = &EventBus{
|
||||
subscribers: map[string][]chan Event{},
|
||||
subscribers: make(map[string]map[chan Event]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,9 +46,6 @@ func (s *Server) Events() *EventBus {
|
||||
|
||||
// Publish data to a given topic.
|
||||
func (e *EventBus) Publish(topic string, data string) {
|
||||
e.RLock()
|
||||
defer e.RUnlock()
|
||||
|
||||
t := topic
|
||||
// Some of our topics for the socket support passing a more specific namespace,
|
||||
// such as "backup completed:1234" to indicate which specific backup was completed.
|
||||
@@ -58,13 +60,19 @@ func (e *EventBus) Publish(topic string, data string) {
|
||||
}
|
||||
}
|
||||
|
||||
if ch, ok := e.subscribers[t]; ok {
|
||||
go func(data Event, cs []chan Event) {
|
||||
for _, channel := range cs {
|
||||
channel <- data
|
||||
// Acquire a read lock and loop over all of the channels registered for the topic. This
|
||||
// avoids a panic crash if the process tries to unregister the channel while this routine
|
||||
// is running.
|
||||
go func() {
|
||||
e.RLock()
|
||||
defer e.RUnlock()
|
||||
|
||||
if ch, ok := e.subscribers[t]; ok {
|
||||
for channel := range ch {
|
||||
channel <- Event{Data: data, Topic: topic}
|
||||
}
|
||||
}(Event{Data: data, Topic: topic}, ch)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (e *EventBus) PublishJson(topic string, data interface{}) error {
|
||||
@@ -83,24 +91,25 @@ func (e *EventBus) Subscribe(topic string, ch chan Event) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
if p, ok := e.subscribers[topic]; ok {
|
||||
e.subscribers[topic] = append(p, ch)
|
||||
} else {
|
||||
e.subscribers[topic] = append([]chan Event{}, ch)
|
||||
if _, exists := e.subscribers[topic]; !exists {
|
||||
e.subscribers[topic] = make(map[chan Event]struct{})
|
||||
}
|
||||
|
||||
// Only set the channel if there is not currently a matching one for this topic. This
|
||||
// avoids registering two identical listeners for the same topic and causing pain in
|
||||
// the unsubscribe functionality as well.
|
||||
if _, exists := e.subscribers[topic][ch]; !exists {
|
||||
e.subscribers[topic][ch] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Unsubscribe a channel from a topic.
|
||||
// Unsubscribe a channel from a given topic.
|
||||
func (e *EventBus) Unsubscribe(topic string, ch chan Event) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
if _, ok := e.subscribers[topic]; ok {
|
||||
for i := range e.subscribers[topic] {
|
||||
if ch == e.subscribers[topic][i] {
|
||||
e.subscribers[topic] = append(e.subscribers[topic][:i], e.subscribers[topic][i+1:]...)
|
||||
}
|
||||
}
|
||||
if _, exists := e.subscribers[topic][ch]; exists {
|
||||
delete(e.subscribers[topic], ch)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,9 +121,6 @@ func (e *EventBus) UnsubscribeAll() {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
// Loop over all of the subscribers and just remove all of the events
|
||||
// for them.
|
||||
for t := range e.subscribers {
|
||||
e.subscribers[t] = make([]chan Event, 0)
|
||||
}
|
||||
// Reset the entire struct into an empty map.
|
||||
e.subscribers = make(map[string]map[chan Event]struct{})
|
||||
}
|
||||
|
||||
@@ -40,8 +40,8 @@ func IsPathResolutionError(err error) bool {
|
||||
}
|
||||
|
||||
type Filesystem struct {
|
||||
Server *Server
|
||||
cacheDiskMu sync.Mutex
|
||||
Server *Server
|
||||
cacheDiskMu sync.Mutex
|
||||
}
|
||||
|
||||
// Returns the root path that contains all of a server's data.
|
||||
@@ -114,10 +114,10 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
}
|
||||
|
||||
// Generate a path to the file by cleaning it up and appending the root server path to it. This
|
||||
// DOES NOT gaurantee that the file resolves within the server data directory. You'll want to use
|
||||
// DOES NOT guarantee that the file resolves within the server data directory. You'll want to use
|
||||
// the fs.unsafeIsInDataDirectory(p) function to confirm.
|
||||
func (fs *Filesystem) unsafeFilePath(p string) string {
|
||||
// Calling filpath.Clean on the joined directory will resolve it to the absolute path,
|
||||
// Calling filepath.Clean on the joined directory will resolve it to the absolute path,
|
||||
// removing any ../ type of resolution arguments, and leaving us with a direct path link.
|
||||
//
|
||||
// This will also trim the existing root path off the beginning of the path passed to
|
||||
@@ -424,7 +424,7 @@ func (fs *Filesystem) unsafeStat(p string) (*Stat, error) {
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// Creates a new directory (name) at a specificied path (p) for the server.
|
||||
// Creates a new directory (name) at a specified path (p) for the server.
|
||||
func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
||||
cleaned, err := fs.SafePath(path.Join(p, name))
|
||||
if err != nil {
|
||||
@@ -463,70 +463,46 @@ func (fs *Filesystem) Rename(from string, to string) error {
|
||||
return os.Rename(cleanedFrom, cleanedTo)
|
||||
}
|
||||
|
||||
// Recursively iterates over a directory and sets the permissions on all of the
|
||||
// underlying files.
|
||||
// Recursively iterates over a file or directory and sets the permissions on all of the
|
||||
// underlying files. Iterate over all of the files and directories. If it is a file just
|
||||
// go ahead and perform the chown operation. Otherwise dig deeper into the directory until
|
||||
// we've run out of directories to dig into.
|
||||
func (fs *Filesystem) Chown(path string) error {
|
||||
cleaned, err := fs.SafePath(path)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if s, err := os.Stat(cleaned); err != nil {
|
||||
return errors.WithStack(err)
|
||||
} else if !s.IsDir() {
|
||||
return os.Chown(cleaned, config.Get().System.User.Uid, config.Get().System.User.Gid)
|
||||
}
|
||||
uid := config.Get().System.User.Uid
|
||||
gid := config.Get().System.User.Gid
|
||||
|
||||
return fs.chownDirectory(cleaned)
|
||||
}
|
||||
|
||||
// Iterate over all of the files and directories. If it is a file just go ahead and perform
|
||||
// the chown operation. Otherwise dig deeper into the directory until we've run out of
|
||||
// directories to dig into.
|
||||
func (fs *Filesystem) chownDirectory(path string) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
cleaned, err := fs.SafePath(path)
|
||||
if err != nil {
|
||||
// Start by just chowning the initial path that we received.
|
||||
if err := os.Chown(cleaned, uid, gid); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Chown the directory itself.
|
||||
os.Chown(cleaned, config.Get().System.User.Uid, config.Get().System.User.Gid)
|
||||
|
||||
files, err := ioutil.ReadDir(cleaned)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
// If this is not a directory we can now return from the function, there is nothing
|
||||
// left that we need to do.
|
||||
if st, _ := os.Stat(cleaned); !st.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
// If this was a directory, begin walking over its contents recursively and ensure that all
|
||||
// of the subfiles and directories get their permissions updated as well.
|
||||
return fs.Walk(cleaned, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fs.handleWalkerError(err, f)
|
||||
}
|
||||
|
||||
// Do not attempt to chmod a symlink. Go's os.Chown function will affect the symlink
|
||||
// so if it points to a location outside the data directory the user would be able to
|
||||
// (un)intentionally modify that files permissions.
|
||||
if f.Mode()&os.ModeSymlink != 0 {
|
||||
continue
|
||||
return nil
|
||||
}
|
||||
|
||||
p, err := fs.SafeJoin(cleaned, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if f.IsDir() {
|
||||
wg.Add(1)
|
||||
|
||||
go func(p string) {
|
||||
defer wg.Done()
|
||||
fs.chownDirectory(p)
|
||||
}(p)
|
||||
} else {
|
||||
os.Chown(p, config.Get().System.User.Uid, config.Get().System.User.Gid)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
return os.Chown(path, uid, gid)
|
||||
})
|
||||
}
|
||||
|
||||
// Copies a given file to the same location and appends a suffix to the file to indicate that
|
||||
@@ -540,7 +516,7 @@ func (fs *Filesystem) Copy(p string) error {
|
||||
}
|
||||
|
||||
if s, err := os.Stat(cleaned); err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
} else if s.IsDir() || !s.Mode().IsRegular() {
|
||||
// If this is a directory or not a regular file, just throw a not-exist error
|
||||
// since anything calling this function should understand what that means.
|
||||
|
||||
@@ -73,6 +73,11 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Make sure the file exists basically.
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
|
||||
// and then extract that file from the archive and write it to the disk. If any part of this process
|
||||
// encounters an error the entire process will be stopped.
|
||||
|
||||
@@ -39,7 +39,7 @@ func newPooledWalker(fs *Filesystem) *PooledFileWalker {
|
||||
// Create a worker pool that is the same size as the number of processors available on the
|
||||
// system. Going much higher doesn't provide much of a performance boost, and is only more
|
||||
// likely to lead to resource overloading anyways.
|
||||
pool: workerpool.New(runtime.GOMAXPROCS(0)),
|
||||
pool: workerpool.New(runtime.NumCPU()),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,9 @@ func (s *Server) Install(sync bool) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Send the start event so the Panel can automatically update.
|
||||
s.Events().Publish(InstallStartedEvent, "")
|
||||
|
||||
err := s.internalInstall()
|
||||
|
||||
s.Log().Debug("notifying panel of server install state")
|
||||
@@ -52,6 +55,10 @@ func (s *Server) Install(sync bool) error {
|
||||
l.Warn("failed to notify panel of server install state")
|
||||
}
|
||||
|
||||
// Push an event to the websocket so we can auto-refresh the information in the panel once
|
||||
// the install is completed.
|
||||
s.Events().Publish(InstallCompletedEvent, "")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -369,7 +376,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
||||
|
|
||||
| Details
|
||||
| ------------------------------
|
||||
Server UUID: {{.Server.Id()}}
|
||||
Server UUID: {{.Server.Id}}
|
||||
Container Image: {{.Script.ContainerImage}}
|
||||
Container Entrypoint: {{.Script.Entrypoint}}
|
||||
|
||||
|
||||
@@ -3,10 +3,12 @@ package server
|
||||
import (
|
||||
"github.com/apex/log"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/gammazero/workerpool"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -23,16 +25,6 @@ func LoadDirectory() error {
|
||||
return errors.New("cannot call LoadDirectory with a non-nil collection")
|
||||
}
|
||||
|
||||
// We could theoretically use a standard wait group here, however doing
|
||||
// that introduces the potential to crash the program due to too many
|
||||
// open files. This wouldn't happen on a small setup, but once the daemon is
|
||||
// handling many servers you run that risk.
|
||||
//
|
||||
// For now just process 10 files at a time, that should be plenty fast to
|
||||
// read and parse the YAML. We should probably make this configurable down
|
||||
// the road to help big instances scale better.
|
||||
wg := sizedwaitgroup.New(10)
|
||||
|
||||
configs, rerr, err := api.NewRequester().GetAllServerConfigurations()
|
||||
if err != nil || rerr != nil {
|
||||
if err != nil {
|
||||
@@ -49,12 +41,13 @@ func LoadDirectory() error {
|
||||
}
|
||||
|
||||
log.WithField("total_configs", len(configs)).Debug("looping over received configurations from API")
|
||||
|
||||
pool := workerpool.New(runtime.NumCPU())
|
||||
for uuid, data := range configs {
|
||||
wg.Add()
|
||||
|
||||
go func(uuid string, data *api.ServerConfigurationResponse) {
|
||||
defer wg.Done()
|
||||
uuid := uuid
|
||||
data := data
|
||||
|
||||
pool.Submit(func() {
|
||||
log.WithField("uuid", uuid).Debug("creating server object from configuration")
|
||||
s, err := FromConfiguration(data)
|
||||
if err != nil {
|
||||
@@ -68,12 +61,12 @@ func LoadDirectory() error {
|
||||
}
|
||||
|
||||
servers.Add(s)
|
||||
}(uuid, data)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait until we've processed all of the configuration files in the directory
|
||||
// before continuing.
|
||||
wg.Wait()
|
||||
pool.StopWait()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -108,7 +101,12 @@ func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
|
||||
Server: s,
|
||||
}
|
||||
s.Filesystem = Filesystem{
|
||||
Server: s,
|
||||
Server: s,
|
||||
}
|
||||
|
||||
// If the server's data directory exists, force disk usage calculation.
|
||||
if _, err := os.Stat(s.Filesystem.Path()); err == nil {
|
||||
go s.Filesystem.HasSpaceAvailable()
|
||||
}
|
||||
|
||||
// Forces the configuration to be synced with the panel.
|
||||
|
||||
@@ -1,12 +1,80 @@
|
||||
package server
|
||||
|
||||
type PowerAction struct {
|
||||
Action string `json:"action"`
|
||||
import (
|
||||
"context"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type PowerAction string
|
||||
|
||||
// The power actions that can be performed for a given server. This taps into the given server
|
||||
// environment and performs them in a way that prevents a race condition from occurring. For
|
||||
// example, sending two "start" actions back to back will not process the second action until
|
||||
// the first action has been completed.
|
||||
//
|
||||
// This utilizes a workerpool with a limit of one worker so that all of the actions execute
|
||||
// in a sync manner.
|
||||
const (
|
||||
PowerActionStart = "start"
|
||||
PowerActionStop = "stop"
|
||||
PowerActionRestart = "restart"
|
||||
PowerActionTerminate = "kill"
|
||||
)
|
||||
|
||||
// Checks if the power action being received is valid.
|
||||
func (pa PowerAction) IsValid() bool {
|
||||
return pa == PowerActionStart ||
|
||||
pa == PowerActionStop ||
|
||||
pa == PowerActionTerminate ||
|
||||
pa == PowerActionRestart
|
||||
}
|
||||
|
||||
func (pr *PowerAction) IsValid() bool {
|
||||
return pr.Action == "start" ||
|
||||
pr.Action == "stop" ||
|
||||
pr.Action == "kill" ||
|
||||
pr.Action == "restart"
|
||||
// Helper function that can receive a power action and then process the actions that need
|
||||
// to occur for it. This guards against someone calling Start() twice at the same time, or
|
||||
// trying to restart while another restart process is currently running.
|
||||
//
|
||||
// However, the code design for the daemon does depend on the user correctly calling this
|
||||
// function rather than making direct calls to the start/stop/restart functions on the
|
||||
// environment struct.
|
||||
func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error {
|
||||
if s.powerLock == nil {
|
||||
s.powerLock = semaphore.NewWeighted(1)
|
||||
}
|
||||
|
||||
// Determines if we should wait for the lock or not. If a value greater than 0 is passed
|
||||
// into this function we will wait that long for a lock to be acquired.
|
||||
if len(waitSeconds) > 0 && waitSeconds[0] != 0 {
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*time.Duration(waitSeconds[0]))
|
||||
// Attempt to acquire a lock on the power action lock for up to 30 seconds. If more
|
||||
// time than that passes an error will be propagated back up the chain and this
|
||||
// request will be aborted.
|
||||
if err := s.powerLock.Acquire(ctx, 1); err != nil {
|
||||
return errors.WithMessage(err, "could not acquire lock on power state")
|
||||
}
|
||||
} else {
|
||||
// If no wait duration was provided we will attempt to immediately acquire the lock
|
||||
// and bail out with a context deadline error if it is not acquired immediately.
|
||||
if ok := s.powerLock.TryAcquire(1); !ok {
|
||||
return errors.WithMessage(context.DeadlineExceeded, "could not acquire lock on power state")
|
||||
}
|
||||
}
|
||||
|
||||
// Release the lock once the process being requested has finished executing.
|
||||
defer s.powerLock.Release(1)
|
||||
|
||||
switch action {
|
||||
case PowerActionStart:
|
||||
return s.Environment.Start()
|
||||
case PowerActionStop:
|
||||
return s.Environment.Stop()
|
||||
case PowerActionRestart:
|
||||
return s.Environment.Restart()
|
||||
case PowerActionTerminate:
|
||||
return s.Environment.Terminate(os.Kill)
|
||||
}
|
||||
|
||||
return errors.New("attempting to handle unknown power action")
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -19,6 +18,8 @@ type Server struct {
|
||||
// Internal mutex used to block actions that need to occur sequentially, such as
|
||||
// writing the configuration to the disk.
|
||||
sync.RWMutex
|
||||
emitterLock sync.Mutex
|
||||
powerLock *semaphore.Weighted
|
||||
|
||||
// Maintains the configuration for the server. This is the data that gets returned by the Panel
|
||||
// such as build settings and container images.
|
||||
@@ -157,23 +158,6 @@ func (s *Server) GetProcessConfiguration() (*api.ServerConfigurationResponse, *a
|
||||
return api.NewRequester().GetServerConfiguration(s.Id())
|
||||
}
|
||||
|
||||
// Helper function that can receieve a power action and then process the
|
||||
// actions that need to occur for it.
|
||||
func (s *Server) HandlePowerAction(action PowerAction) error {
|
||||
switch action.Action {
|
||||
case "start":
|
||||
return s.Environment.Start()
|
||||
case "restart":
|
||||
return s.Environment.Restart()
|
||||
case "stop":
|
||||
return s.Environment.Stop()
|
||||
case "kill":
|
||||
return s.Environment.Terminate(os.Kill)
|
||||
default:
|
||||
return errors.New("an invalid power action was provided")
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the server is marked as being suspended or not on the system.
|
||||
func (s *Server) IsSuspended() bool {
|
||||
return s.Config().Suspended
|
||||
|
||||
@@ -30,8 +30,8 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
||||
// Grab a copy of the configuration to work on.
|
||||
c := *s.Config()
|
||||
|
||||
// Lock our copy of the configuration since the defered unlock will end up acting upon this
|
||||
// new memory address rather than the old one. If we don't lock this, the defered unlock will
|
||||
// Lock our copy of the configuration since the deferred unlock will end up acting upon this
|
||||
// new memory address rather than the old one. If we don't lock this, the deferred unlock will
|
||||
// cause a panic when it goes to run. However, since we only update s.cfg at the end, if there
|
||||
// is an error before that point we'll still properly unlock the original configuration for the
|
||||
// server.
|
||||
|
||||
Reference in New Issue
Block a user