Compare commits
55 Commits
v1.0.0-rc.
...
v1.0.0-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
033e8e7573 | ||
|
|
aa78071543 | ||
|
|
48aeeff818 | ||
|
|
864c37f17c | ||
|
|
c7405aebe5 | ||
|
|
9ff2d53466 | ||
|
|
6ba49df485 | ||
|
|
6b25ac3665 | ||
|
|
783832fc71 | ||
|
|
815539b3da | ||
|
|
6ba1b75696 | ||
|
|
ce76b9339e | ||
|
|
6ba15e9884 | ||
|
|
f2a6d6b3c5 | ||
|
|
0295603943 | ||
|
|
ce2659fdd7 | ||
|
|
be49e08f4f | ||
|
|
3ee76ea2bc | ||
|
|
d7fbf29cc1 | ||
|
|
d02e37620d | ||
|
|
53bd0d57ad | ||
|
|
b779c98717 | ||
|
|
4ac19bd29d | ||
|
|
8407ea21da | ||
|
|
fa6f56caa8 | ||
|
|
5a62f83ec8 | ||
|
|
8bcb3d7c62 | ||
|
|
b2eebcaf6d | ||
|
|
45bcb9cd68 | ||
|
|
e1ff4db330 | ||
|
|
606143b3ad | ||
|
|
57221bdd30 | ||
|
|
8f6494b092 | ||
|
|
c415abf971 | ||
|
|
e10844d32c | ||
|
|
0cd8dc2b5f | ||
|
|
a31e805c5a | ||
|
|
cff705f807 | ||
|
|
c19fc25882 | ||
|
|
fff9a89ebb | ||
|
|
891e5baa27 | ||
|
|
001bbfad1b | ||
|
|
5bead443ad | ||
|
|
77cf57d1ea | ||
|
|
d743d8cfeb | ||
|
|
a81146d730 | ||
|
|
d50f9a83b6 | ||
|
|
7ba32aca84 | ||
|
|
b9f6e17a7d | ||
|
|
d99225c0fb | ||
|
|
490f874128 | ||
|
|
70afbbfc68 | ||
|
|
e09cc3d2dd | ||
|
|
b6008108ac | ||
|
|
1d22e84f21 |
4
.github/workflows/build-test.yml
vendored
4
.github/workflows/build-test.yml
vendored
@@ -14,10 +14,10 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.15'
|
||||
go-version: '1.15.2'
|
||||
|
||||
- name: Build
|
||||
run: GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -ldflags "-X github.com/pterodactyl/wings/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_linux_amd64 -v wings.go
|
||||
run: GOOS=linux GOARCH=amd64 go build -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_linux_amd64 -v wings.go
|
||||
|
||||
- name: Test
|
||||
run: go test ./...
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -12,12 +12,12 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.15'
|
||||
go-version: '1.15.2'
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
REF: ${{ github.ref }}
|
||||
run: GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -ldflags "-X github.com/pterodactyl/wings/system.Version=${REF:11}" -o build/wings_linux_amd64 -v wings.go
|
||||
run: GOOS=linux GOARCH=amd64 go build -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${REF:11}" -o build/wings_linux_amd64 -v wings.go
|
||||
|
||||
- name: Test
|
||||
run: go test ./...
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Pterodactyl Panel Dockerfile
|
||||
# ----------------------------------
|
||||
|
||||
FROM golang:1.14-alpine
|
||||
FROM golang:1.15-alpine
|
||||
COPY . /go/wings/
|
||||
WORKDIR /go/wings/
|
||||
RUN apk add --no-cache upx \
|
||||
@@ -11,4 +11,4 @@ RUN apk add --no-cache upx \
|
||||
|
||||
FROM alpine:latest
|
||||
COPY --from=0 /go/wings/wings /usr/bin/
|
||||
CMD ["wings","--config", "/var/lib/pterodactyl/config.yml"]
|
||||
CMD ["wings","--config", "/etc/pterodactyl/config.yml"]
|
||||
@@ -1,6 +1,7 @@
|
||||
[](https://pterodactyl.io)
|
||||
|
||||
[](https://pterodactyl.io/discord)
|
||||
[](https://goreportcard.com/report/github.com/pterodactyl/wings)
|
||||
|
||||
# Pterodactyl Wings
|
||||
Wings is Pterodactyl's server control plane, built for the rapidly changing gaming industry and designed to be
|
||||
|
||||
12
api/api.go
12
api/api.go
@@ -137,6 +137,7 @@ func IsRequestError(err error) bool {
|
||||
}
|
||||
|
||||
type RequestError struct {
|
||||
response *http.Response
|
||||
Code string `json:"code"`
|
||||
Status string `json:"status"`
|
||||
Detail string `json:"detail"`
|
||||
@@ -144,7 +145,7 @@ type RequestError struct {
|
||||
|
||||
// Returns the error response in a string form that can be more easily consumed.
|
||||
func (re *RequestError) Error() string {
|
||||
return fmt.Sprintf("Error response from Panel: %s: %s (HTTP/%s)", re.Code, re.Detail, re.Status)
|
||||
return fmt.Sprintf("Error response from Panel: %s: %s (HTTP/%d)", re.Code, re.Detail, re.response.StatusCode)
|
||||
}
|
||||
|
||||
func (re *RequestError) String() string {
|
||||
@@ -165,9 +166,12 @@ func (r *PanelRequest) Error() *RequestError {
|
||||
bag := RequestErrorBag{}
|
||||
json.Unmarshal(body, &bag)
|
||||
|
||||
if len(bag.Errors) == 0 {
|
||||
return new(RequestError)
|
||||
e := new(RequestError)
|
||||
if len(bag.Errors) > 0 {
|
||||
e = &bag.Errors[0]
|
||||
}
|
||||
|
||||
return &bag.Errors[0]
|
||||
e.response = r.Response
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ var validUsernameRegexp = regexp.MustCompile(`^(?i)(.+)\.([a-z0-9]{8})$`)
|
||||
|
||||
func (r *PanelRequest) ValidateSftpCredentials(request SftpAuthRequest) (*SftpAuthResponse, error) {
|
||||
// If the username doesn't meet the expected format that the Panel would even recognize just go ahead
|
||||
// and bail out of the process here to avoid accidentially brute forcing the panel if a bot decides
|
||||
// and bail out of the process here to avoid accidentally brute forcing the panel if a bot decides
|
||||
// to connect to spam username attempts.
|
||||
if !validUsernameRegexp.MatchString(request.User) {
|
||||
log.WithFields(log.Fields{
|
||||
@@ -68,6 +68,12 @@ func (r *PanelRequest) ValidateSftpCredentials(request SftpAuthRequest) (*SftpAu
|
||||
|
||||
if r.HasError() {
|
||||
if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 {
|
||||
log.WithFields(log.Fields{
|
||||
"subsystem": "sftp",
|
||||
"username": request.User,
|
||||
"ip": request.IP,
|
||||
}).Warn(r.Error().String())
|
||||
|
||||
return nil, new(sftpInvalidCredentialsError)
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||
Name: "ReviewBeforeUpload",
|
||||
Prompt: &survey.Confirm{
|
||||
Message: "Do you want to review the collected data before uploading to hastebin.com?",
|
||||
Help: "The data, especially the logs, might contain sensitive information, so you should review it. You will be asked again if you want to uplaod.",
|
||||
Help: "The data, especially the logs, might contain sensitive information, so you should review it. You will be asked again if you want to upload.",
|
||||
Default: true,
|
||||
},
|
||||
},
|
||||
@@ -82,7 +82,7 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||
_ = dockerInfo
|
||||
|
||||
output := &strings.Builder{}
|
||||
fmt.Fprintln(output, "Pterodactly Wings - Diagnostics Report")
|
||||
fmt.Fprintln(output, "Pterodactyl Wings - Diagnostics Report")
|
||||
printHeader(output, "Versions")
|
||||
fmt.Fprintln(output, "wings:", system.Version)
|
||||
if dockerErr == nil {
|
||||
@@ -210,7 +210,7 @@ func uploadToHastebin(hbUrl, content string) (string, error) {
|
||||
u.Path = path.Join(u.Path, key)
|
||||
return u.String(), nil
|
||||
}
|
||||
return "", errors.New("Couldn't find key in response")
|
||||
return "", errors.New("failed to find key in response")
|
||||
}
|
||||
|
||||
func redact(s string) string {
|
||||
|
||||
70
cmd/root.go
70
cmd/root.go
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"github.com/NYTimes/logrotate"
|
||||
"github.com/apex/log/handlers/multi"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/gammazero/workerpool"
|
||||
"golang.org/x/crypto/acme"
|
||||
"net/http"
|
||||
@@ -27,7 +28,6 @@ import (
|
||||
"github.com/pterodactyl/wings/sftp"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var configPath = config.DefaultLocation
|
||||
@@ -133,15 +133,18 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
config.SetDebugViaFlag(debug)
|
||||
|
||||
if err := c.System.ConfigureDirectories(); err != nil {
|
||||
log.WithError(err).Fatal("failed to configure system directories for pterodactyl")
|
||||
os.Exit(1)
|
||||
log.WithField("error", err).Fatal("failed to configure system directories for pterodactyl")
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.System.EnableLogRotation(); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to configure log rotation on the system")
|
||||
return
|
||||
}
|
||||
|
||||
log.WithField("username", c.System.Username).Info("checking for pterodactyl system user")
|
||||
if su, err := c.EnsurePterodactylUser(); err != nil {
|
||||
log.WithError(err).Error("failed to create pterodactyl system user")
|
||||
os.Exit(1)
|
||||
log.WithField("error", err).Fatal("failed to create pterodactyl system user")
|
||||
return
|
||||
} else {
|
||||
log.WithFields(log.Fields{
|
||||
@@ -158,7 +161,7 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
|
||||
if err := environment.ConfigureDocker(&c.Docker); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to configure docker environment")
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.WriteToDisk(); err != nil {
|
||||
@@ -170,6 +173,11 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
log.WithField("server", s.Id()).Info("loaded configuration for server")
|
||||
}
|
||||
|
||||
states, err := server.CachedServerStates()
|
||||
if err != nil {
|
||||
log.WithField("error", errors.WithStack(err)).Error("failed to retrieve locally cached server states from disk, assuming all servers in offline state")
|
||||
}
|
||||
|
||||
// Create a new workerpool that limits us to 4 servers being bootstrapped at a time
|
||||
// on Wings. This allows us to ensure the environment exists, write configurations,
|
||||
// and reboot processes without causing a slow-down due to sequential booting.
|
||||
@@ -179,25 +187,39 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
s := serv
|
||||
|
||||
pool.Submit(func() {
|
||||
s.Log().Info("ensuring server environment exists")
|
||||
// Create a server environment if none exists currently. This allows us to recover from Docker
|
||||
// being reinstalled on the host system for example.
|
||||
if err := s.Environment.Create(); err != nil {
|
||||
s.Log().WithField("error", err).Error("failed to process environment")
|
||||
s.Log().Info("configuring server environment and restoring to previous state")
|
||||
|
||||
var st string
|
||||
if state, exists := states[s.Id()]; exists {
|
||||
st = state
|
||||
}
|
||||
|
||||
r, err := s.Environment.IsRunning()
|
||||
if err != nil {
|
||||
// We ignore missing containers because we don't want to actually block booting of wings at this
|
||||
// point. If we didn't do this and you pruned all of the images and then started wings you could
|
||||
// end up waiting a long period of time for all of the images to be re-pulled on Wings boot rather
|
||||
// than when the server itself is started.
|
||||
if err != nil && !client.IsErrNotFound(err) {
|
||||
s.Log().WithField("error", err).Error("error checking server environment status")
|
||||
}
|
||||
|
||||
// Check if the server was previously running. If so, attempt to start the server now so that Wings
|
||||
// can pick up where it left off. If the environment does not exist at all, just create it and then allow
|
||||
// the normal flow to execute.
|
||||
//
|
||||
// This does mean that booting wings after a catastrophic machine crash and wiping out the Docker images
|
||||
// as a result will result in a slow boot.
|
||||
if !r && (st == environment.ProcessRunningState || st == environment.ProcessStartingState) {
|
||||
if err := s.HandlePowerAction(server.PowerActionStart); err != nil {
|
||||
s.Log().WithField("error", errors.WithStack(err)).Warn("failed to return server to running state")
|
||||
}
|
||||
} else if r || (!r && s.IsRunning()) {
|
||||
// If the server is currently running on Docker, mark the process as being in that state.
|
||||
// We never want to stop an instance that is currently running external from Wings since
|
||||
// that is a good way of keeping things running even if Wings gets in a very corrupted state.
|
||||
//
|
||||
// This will also validate that a server process is running if the last tracked state we have
|
||||
// is that it was running, but we see that the container process is not currently running.
|
||||
if r || (!r && s.IsRunning()) {
|
||||
s.Log().Info("detected server is running, re-attaching to process...")
|
||||
|
||||
s.SetState(environment.ProcessRunningState)
|
||||
@@ -340,30 +362,18 @@ func configureLogging(logDir string, debug bool) error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
cfg := zap.NewProductionConfig()
|
||||
if debug {
|
||||
cfg = zap.NewDevelopmentConfig()
|
||||
}
|
||||
|
||||
cfg.Encoding = "console"
|
||||
cfg.OutputPaths = []string{
|
||||
"stdout",
|
||||
}
|
||||
|
||||
logger, err := cfg.Build()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
zap.ReplaceGlobals(logger)
|
||||
|
||||
p := filepath.Join(logDir, "/wings.log")
|
||||
w, err := logrotate.NewFile(p)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "failed to open process log file"))
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
} else {
|
||||
log.SetLevel(log.InfoLevel)
|
||||
}
|
||||
|
||||
log.SetHandler(multi.New(
|
||||
cli.Default,
|
||||
cli.New(w.File, false),
|
||||
|
||||
@@ -188,7 +188,7 @@ func NewFromPath(path string) (*Configuration, error) {
|
||||
}
|
||||
|
||||
// Sets the path where the configuration file is located on the server. This function should
|
||||
// not be called except by processes that are generating the configuration such as the configration
|
||||
// not be called except by processes that are generating the configuration such as the configuration
|
||||
// command shipped with this software.
|
||||
func (c *Configuration) unsafeSetPath(path string) {
|
||||
c.Lock()
|
||||
|
||||
@@ -2,8 +2,11 @@ package config
|
||||
|
||||
import (
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"html/template"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Defines basic system configuration settings.
|
||||
@@ -33,6 +36,12 @@ type SystemConfiguration struct {
|
||||
Gid int
|
||||
}
|
||||
|
||||
// The amount of time in seconds that can elapse before a server's disk space calculation is
|
||||
// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
|
||||
// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
|
||||
// process.
|
||||
DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"`
|
||||
|
||||
// Determines if Wings should detect a server that stops with a normal exit code of
|
||||
// "0" as being crashed if the process stopped without any Wings interaction. E.g.
|
||||
// the user did not press the stop button, but the process stopped cleanly.
|
||||
@@ -44,6 +53,10 @@ type SystemConfiguration struct {
|
||||
// frequently modifying a servers' files.
|
||||
CheckPermissionsOnBoot bool `default:"true" yaml:"check_permissions_on_boot"`
|
||||
|
||||
// If set to false Wings will not attempt to write a log rotate configuration to the disk
|
||||
// when it boots and one is not detected.
|
||||
EnableLogRotate bool `default:"true" yaml:"enable_log_rotate"`
|
||||
|
||||
Sftp SftpConfiguration `yaml:"sftp"`
|
||||
}
|
||||
|
||||
@@ -55,6 +68,22 @@ func (sc *SystemConfiguration) ConfigureDirectories() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// There are a non-trivial number of users out there whose data directories are actually a
|
||||
// symlink to another location on the disk. If we do not resolve that final destination at this
|
||||
// point things will appear to work, but endless errors will be encountered when we try to
|
||||
// verify accessed paths since they will all end up resolving outside the expected data directory.
|
||||
//
|
||||
// For the sake of automating away as much of this as possible, see if the data directory is a
|
||||
// symlink, and if so resolve to its final real path, and then update the configuration to use
|
||||
// that.
|
||||
if d, err := filepath.EvalSymlinks(sc.Data); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else if d != sc.Data {
|
||||
sc.Data = d
|
||||
}
|
||||
|
||||
log.WithField("path", sc.Data).Debug("ensuring server data directory exists")
|
||||
if err := os.MkdirAll(sc.Data, 0700); err != nil {
|
||||
return err
|
||||
@@ -73,6 +102,61 @@ func (sc *SystemConfiguration) ConfigureDirectories() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writes a logrotate file for wings to the system logrotate configuration directory if one
|
||||
// exists and a logrotate file is not found. This allows us to basically automate away the log
|
||||
// rotation for most installs, but also enable users to make modifications on their own.
|
||||
func (sc *SystemConfiguration) EnableLogRotation() error {
|
||||
// Do nothing if not enabled.
|
||||
if sc.EnableLogRotate == false {
|
||||
log.Info("skipping log rotate configuration, disabled in wings config file")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if st, err := os.Stat("/etc/logrotate.d"); err != nil && !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
} else if (err != nil && os.IsNotExist(err)) || !st.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := os.Stat("/etc/logrotate.d/wings"); err != nil && !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
} else if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("no log rotation configuration found, system is configured to support it, adding file now")
|
||||
// If we've gotten to this point it means the logrotate directory exists on the system
|
||||
// but there is not a file for wings already. In that case, let us write a new file to
|
||||
// it so files can be rotated easily.
|
||||
f, err := os.Create("/etc/logrotate.d/wings")
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
t, err := template.New("logrotate").Parse(`
|
||||
{{.LogDirectory}}/wings.log {
|
||||
size 10M
|
||||
compress
|
||||
delaycompress
|
||||
dateext
|
||||
maxage 7
|
||||
missingok
|
||||
notifempty
|
||||
create 0640 {{.User.Uid}} {{.User.Gid}}
|
||||
postrotate
|
||||
killall -SIGHUP wings
|
||||
endscript
|
||||
}`)
|
||||
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return errors.Wrap(t.Execute(f, sc), "failed to write logrotate file to disk")
|
||||
}
|
||||
|
||||
// Returns the location of the JSON file that tracks server states.
|
||||
func (sc *SystemConfiguration) GetStatesPath() string {
|
||||
return path.Join(sc.RootDirectory, "states.json")
|
||||
|
||||
@@ -1,23 +1,27 @@
|
||||
package config
|
||||
|
||||
type ConsoleThrottles struct {
|
||||
// Wether or not the throttler is enabled for this instance.
|
||||
// Whether or not the throttler is enabled for this instance.
|
||||
Enabled bool `json:"enabled" yaml:"enabled" default:"true"`
|
||||
|
||||
// The total number of throttle activations that must accumulate before a server is
|
||||
// forcibly stopped for violating these limits.
|
||||
KillAtCount uint64 `json:"kill_at_count" yaml:"kill_at_count" default:"5"`
|
||||
|
||||
// The amount of time in milliseconds that a server process must go through without
|
||||
// triggering an output warning before the throttle activation count begins decreasing.
|
||||
// This time is measured in milliseconds.
|
||||
Decay uint64 `json:"decay" yaml:"decay" default:"10000"`
|
||||
|
||||
// The total number of lines that can be output in a given CheckInterval period before
|
||||
// The total number of lines that can be output in a given LineResetInterval period before
|
||||
// a warning is triggered and counted against the server.
|
||||
Lines uint64 `json:"lines" yaml:"lines" default:"1000"`
|
||||
Lines uint64 `json:"lines" yaml:"lines" default:"2000"`
|
||||
|
||||
// The amount of time that must pass between intervals before the count is reset. This
|
||||
// value is in milliseconds.
|
||||
CheckInterval uint64 `json:"check_interval" yaml:"check_interval" default:"100"`
|
||||
// The total number of throttle activations that can accumulate before a server is considered
|
||||
// to be breaching and will be stopped. This value is decremented by one every DecayInterval.
|
||||
MaximumTriggerCount uint64 `json:"maximum_trigger_count" yaml:"maximum_trigger_count" default:"5"`
|
||||
|
||||
// The amount of time after which the number of lines processed is reset to 0. This runs in
|
||||
// a constant loop and is not affected by the current console output volumes. By default, this
|
||||
// will reset the processed line count back to 0 every 100ms.
|
||||
LineResetInterval uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"`
|
||||
|
||||
// The amount of time in milliseconds that must pass without an output warning being triggered
|
||||
// before a throttle activation is decremented.
|
||||
DecayInterval uint64 `json:"decay_interval" yaml:"decay_interval" default:"10000"`
|
||||
|
||||
// The amount of time that a server is allowed to be stopping for before it is terminated
|
||||
// forfully if it triggers output throttles.
|
||||
StopGracePeriod uint `json:"stop_grace_period" yaml:"stop_grace_period" default:"15"`
|
||||
}
|
||||
@@ -1,26 +1,35 @@
|
||||
version: '3'
|
||||
version: '3.5'
|
||||
services:
|
||||
daemon:
|
||||
build: .
|
||||
restart: always
|
||||
hostname: daemon
|
||||
networks:
|
||||
- daemon0
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "2022:2022"
|
||||
tty: true
|
||||
environment:
|
||||
- "DEBUG=false"
|
||||
- "TZ=UTC" # change to the three letter timezone of your choosing
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
- "/var/lib/docker/containers/:/var/lib/docker/containers/"
|
||||
- "/etc/pterodactyl/:/etc/pterodactyl/"
|
||||
- "/var/lib/pterodactyl/:/var/lib/pterodactyl/"
|
||||
- "/srv/daemon-data/:/srv/daemon-data/"
|
||||
- "/var/log/pterodactyl/:/var/log/pterodactyl/"
|
||||
- "/tmp/pterodactyl/:/tmp/pterodactyl/"
|
||||
- "/etc/timezone:/etc/timezone:ro"
|
||||
## you may need /srv/daemon-data if you are upgrading from an old daemon
|
||||
## - "/srv/daemon-data/:/srv/daemon-data/"
|
||||
## Required for ssl if you user let's encrypt. uncomment to use.
|
||||
## - "/etc/letsencrypt/:/etc/letsencrypt/"
|
||||
|
||||
networks:
|
||||
default:
|
||||
daemon0:
|
||||
name: daemon0
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.21.0.0/16
|
||||
- subnet: "172.21.0.0/16"
|
||||
driver_opts:
|
||||
com.docker.network.bridge.name: daemon0
|
||||
@@ -3,6 +3,7 @@ package environment
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@@ -25,6 +26,8 @@ type Allocations struct {
|
||||
// Converts the server allocation mappings into a format that can be understood by Docker. While
|
||||
// we do strive to support multiple environments, using Docker's standardized format for the
|
||||
// bindings certainly makes life a little easier for managing things.
|
||||
//
|
||||
// You'll want to use DockerBindings() if you need to re-map 127.0.0.1 to the Docker interface.
|
||||
func (a *Allocations) Bindings() nat.PortMap {
|
||||
var out = nat.PortMap{}
|
||||
|
||||
@@ -50,16 +53,47 @@ func (a *Allocations) Bindings() nat.PortMap {
|
||||
return out
|
||||
}
|
||||
|
||||
// Returns the bindings for the server in a way that is supported correctly by Docker. This replaces
|
||||
// any reference to 127.0.0.1 with the IP of the pterodactyl0 network interface which will allow the
|
||||
// server to operate on a local address while still being accessible by other containers.
|
||||
func (a *Allocations) DockerBindings() nat.PortMap {
|
||||
iface := config.Get().Docker.Network.Interface
|
||||
|
||||
out := a.Bindings()
|
||||
// Loop over all of the bindings for this container, and convert any that reference 127.0.0.1
|
||||
// to use the pterodactyl0 network interface IP, as that is the true local for what people are
|
||||
// trying to do when creating servers.
|
||||
for p, binds := range out {
|
||||
for i, alloc := range binds {
|
||||
if alloc.HostIP != "127.0.0.1" {
|
||||
continue
|
||||
}
|
||||
|
||||
// If using ISPN just delete the local allocation from the server.
|
||||
if config.Get().Docker.Network.ISPN {
|
||||
out[p] = append(out[p][:i], out[p][i+1:]...)
|
||||
} else {
|
||||
out[p][i] = nat.PortBinding{
|
||||
HostIP: iface,
|
||||
HostPort: alloc.HostPort,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Converts the server allocation mappings into a PortSet that can be understood
|
||||
// by Docker. This formatting is slightly different than "Bindings" as it should
|
||||
// return an empty struct rather than a binding.
|
||||
//
|
||||
// To accomplish this, we'll just get the values from "Bindings" and then set them
|
||||
// To accomplish this, we'll just get the values from "DockerBindings" and then set them
|
||||
// to empty structs. Because why not.
|
||||
func (a *Allocations) Exposed() nat.PortSet {
|
||||
var out = nat.PortSet{}
|
||||
|
||||
for port := range a.Bindings() {
|
||||
for port := range a.DockerBindings() {
|
||||
out[port] = struct{}{}
|
||||
}
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ func (c *Configuration) Limits() Limits {
|
||||
return c.settings.Limits
|
||||
}
|
||||
|
||||
// Rturns the allocations associated with this environment.
|
||||
// Returns the allocations associated with this environment.
|
||||
func (c *Configuration) Allocations() Allocations {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
@@ -3,6 +3,7 @@ package docker
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -19,6 +20,11 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type imagePullStatus struct {
|
||||
Status string `json:"status"`
|
||||
Progress string `json:"progress"`
|
||||
}
|
||||
|
||||
// Attaches to the docker container itself and ensures that we can pipe data in and out
|
||||
// of the process stream. This should not be used for reading console data as you *will*
|
||||
// miss important output at the beginning because of the time delay with attaching to the
|
||||
@@ -57,15 +63,21 @@ func (e *Environment) Attach() error {
|
||||
e.SetStream(nil)
|
||||
}()
|
||||
|
||||
// Poll resources in a seperate thread since this will block the copy call below
|
||||
// from being reached until it is completed if not run in a seperate process. However,
|
||||
// Poll resources in a separate thread since this will block the copy call below
|
||||
// from being reached until it is completed if not run in a separate process. However,
|
||||
// we still want it to be stopped when the copy operation below is finished running which
|
||||
// indicates that the container is no longer running.
|
||||
go e.pollResources(ctx)
|
||||
go func(ctx context.Context) {
|
||||
if err := e.pollResources(ctx); err != nil {
|
||||
log.WithField("environment_id", e.Id).WithField("error", errors.WithStack(err)).Error("error during environment resource polling")
|
||||
}
|
||||
}(ctx)
|
||||
|
||||
// Stream the reader output to the console which will then fire off events and handle console
|
||||
// throttling and sending the output to the user.
|
||||
_, _ = io.Copy(console, e.stream.Reader)
|
||||
if _, err := io.Copy(console, e.stream.Reader); err != nil {
|
||||
log.WithField("environment_id", e.Id).WithField("error", errors.WithStack(err)).Error("error while copying environment output to console")
|
||||
}
|
||||
}(c)
|
||||
|
||||
return nil
|
||||
@@ -137,6 +149,15 @@ func (e *Environment) Create() error {
|
||||
|
||||
a := e.Configuration.Allocations()
|
||||
|
||||
evs := e.Configuration.EnvironmentVariables()
|
||||
for i, v := range evs {
|
||||
// Convert 127.0.0.1 to the pterodactyl0 network interface if the environment is Docker
|
||||
// so that the server operates as expected.
|
||||
if v == "SERVER_IP=127.0.0.1" {
|
||||
evs[i] = "SERVER_IP=" + config.Get().Docker.Network.Interface
|
||||
}
|
||||
}
|
||||
|
||||
conf := &container.Config{
|
||||
Hostname: e.Id,
|
||||
Domainname: config.Get().Docker.Domainname,
|
||||
@@ -158,10 +179,10 @@ func (e *Environment) Create() error {
|
||||
tmpfsSize := strconv.Itoa(int(config.Get().Docker.TmpfsSize))
|
||||
|
||||
hostConf := &container.HostConfig{
|
||||
PortBindings: a.Bindings(),
|
||||
PortBindings: a.DockerBindings(),
|
||||
|
||||
// Configure the mounts for this container. First mount the server data directory
|
||||
// into the container as a r/w bine.
|
||||
// into the container as a r/w bind.
|
||||
Mounts: e.convertMounts(),
|
||||
|
||||
// Configure the /tmp folder mapping in containers. This is necessary for some
|
||||
@@ -222,7 +243,7 @@ func (e *Environment) convertMounts() []mount.Mount {
|
||||
// Remove the Docker container from the machine. If the container is currently running
|
||||
// it will be forcibly stopped by Docker.
|
||||
func (e *Environment) Destroy() error {
|
||||
// We set it to stopping than offline to prevent crash detection from being triggeree.
|
||||
// We set it to stopping than offline to prevent crash detection from being triggered.
|
||||
e.setState(environment.ProcessStoppingState)
|
||||
|
||||
err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{
|
||||
@@ -244,7 +265,7 @@ func (e *Environment) Destroy() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Attaches to the log for the container. This avoids us missing cruicial output that
|
||||
// Attaches to the log for the container. This avoids us missing crucial output that
|
||||
// happens in the split seconds before the code moves from 'Starting' to 'Attaching'
|
||||
// on the process.
|
||||
func (e *Environment) followOutput() error {
|
||||
@@ -290,8 +311,11 @@ func (e *Environment) followOutput() error {
|
||||
// cases an outage shouldn't affect users too badly. It'll at least keep existing servers working
|
||||
// correctly if anything.
|
||||
//
|
||||
// TODO: handle authorization & local images
|
||||
// TODO: local images
|
||||
func (e *Environment) ensureImageExists(image string) error {
|
||||
e.Events().Publish(environment.DockerImagePullStarted, "")
|
||||
defer e.Events().Publish(environment.DockerImagePullCompleted, "")
|
||||
|
||||
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
|
||||
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
|
||||
// an image. Let me know when I am inevitably wrong here...
|
||||
@@ -356,15 +380,21 @@ func (e *Environment) ensureImageExists(image string) error {
|
||||
log.WithField("image", image).Debug("pulling docker image... this could take a bit of time")
|
||||
|
||||
// I'm not sure what the best approach here is, but this will block execution until the image
|
||||
// is done being pulled, which is what we neee.
|
||||
// is done being pulled, which is what we need.
|
||||
scanner := bufio.NewScanner(out)
|
||||
for scanner.Scan() {
|
||||
continue
|
||||
s := imagePullStatus{}
|
||||
fmt.Println(scanner.Text())
|
||||
if err := json.Unmarshal(scanner.Bytes(), &s); err == nil {
|
||||
e.Events().Publish(environment.DockerImagePullStatus, s.Status+" "+s.Progress)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithField("image", image).Debug("completed docker image pull")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -103,7 +103,7 @@ func (e *Environment) Events() *events.EventBus {
|
||||
// Determines if the container exists in this environment. The ID passed through should be the
|
||||
// server UUID since containers are created utilizing the server UUID as the name and docker
|
||||
// will work fine when using the container name as the lookup parameter in addition to the longer
|
||||
// ID auto-assigned when the container is createe.
|
||||
// ID auto-assigned when the container is created.
|
||||
func (e *Environment) Exists() (bool, error) {
|
||||
_, err := e.client.ContainerInspect(context.Background(), e.Id)
|
||||
|
||||
@@ -137,7 +137,7 @@ func (e *Environment) IsRunning() (bool, error) {
|
||||
return c.State.Running, nil
|
||||
}
|
||||
|
||||
// Determine the container exit state and return the exit code and wether or not
|
||||
// Determine the container exit state and return the exit code and whether or not
|
||||
// the container was killed by the OOM killer.
|
||||
func (e *Environment) ExitState() (uint32, bool, error) {
|
||||
c, err := e.client.ContainerInspect(context.Background(), e.Id)
|
||||
@@ -148,7 +148,7 @@ func (e *Environment) ExitState() (uint32, bool, error) {
|
||||
//
|
||||
// However, someone reported an error in Discord about this scenario happening,
|
||||
// so I guess this should prevent it? They didn't tell me how they caused it though
|
||||
// so that's a mystery that will have to go unsolvee.
|
||||
// so that's a mystery that will have to go unsolved.
|
||||
//
|
||||
// @see https://github.com/pterodactyl/panel/issues/2003
|
||||
if client.IsErrNotFound(err) {
|
||||
@@ -176,3 +176,9 @@ func (e *Environment) SetStopConfiguration(c *api.ProcessStopConfiguration) {
|
||||
e.meta.Stop = c
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
func (e *Environment) SetImage(i string) {
|
||||
e.mu.Lock()
|
||||
e.meta.Image = i
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ func (e *Environment) OnBeforeStart() error {
|
||||
// container and data storage directory.
|
||||
//
|
||||
// This won't actually run an installation process however, it is just here to ensure the
|
||||
// environment gets created properly if it is missing and the server is startee. We're making
|
||||
// environment gets created properly if it is missing and the server is started. We're making
|
||||
// an assumption that all of the files will still exist at this point.
|
||||
if err := e.Create(); err != nil {
|
||||
return err
|
||||
@@ -64,7 +64,7 @@ func (e *Environment) Start() error {
|
||||
|
||||
if c, err := e.client.ContainerInspect(context.Background(), e.Id); err != nil {
|
||||
// Do nothing if the container is not found, we just don't want to continue
|
||||
// to the next block of code here. This check was inlined here to guard againt
|
||||
// to the next block of code here. This check was inlined here to guard against
|
||||
// a nil-pointer when checking c.State below.
|
||||
//
|
||||
// @see https://github.com/pterodactyl/panel/issues/2000
|
||||
@@ -128,7 +128,7 @@ func (e *Environment) Stop() error {
|
||||
|
||||
if s == nil || s.Type == api.ProcessStopSignal {
|
||||
if s == nil {
|
||||
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using termination proceedure")
|
||||
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using termination procedure")
|
||||
}
|
||||
|
||||
return e.Terminate(os.Kill)
|
||||
@@ -183,13 +183,21 @@ func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
|
||||
case <-ctx.Done():
|
||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
if terminate {
|
||||
return e.Terminate(os.Kill)
|
||||
log.WithField("container_id", e.Id).Debug("server did not stop in time, executing process termination")
|
||||
|
||||
return errors.WithStack(e.Terminate(os.Kill))
|
||||
}
|
||||
|
||||
return errors.WithStack(ctxErr)
|
||||
}
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
if terminate {
|
||||
log.WithField("container_id", e.Id).WithField("error", errors.WithStack(err)).Warn("error while waiting for container stop, attempting process termination")
|
||||
|
||||
return errors.WithStack(e.Terminate(os.Kill))
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
case <-ok:
|
||||
@@ -217,7 +225,7 @@ func (e *Environment) Terminate(signal os.Signal) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We set it to stopping than offline to prevent crash detection from being triggeree.
|
||||
// We set it to stopping than offline to prevent crash detection from being triggered.
|
||||
e.setState(environment.ProcessStoppingState)
|
||||
|
||||
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
|
||||
|
||||
@@ -27,7 +27,7 @@ func (e *Environment) setState(state string) error {
|
||||
// Get the current state of the environment before changing it.
|
||||
prevState := e.State()
|
||||
|
||||
// Emit the event to any listeners that are currently registeree.
|
||||
// Emit the event to any listeners that are currently registered.
|
||||
if prevState != state {
|
||||
// If the state changed make sure we update the internal tracking to note that.
|
||||
e.stMu.Lock()
|
||||
|
||||
@@ -15,14 +15,20 @@ import (
|
||||
// Attach to the instance and then automatically emit an event whenever the resource usage for the
|
||||
// server process changes.
|
||||
func (e *Environment) pollResources(ctx context.Context) error {
|
||||
l := log.WithField("container_id", e.Id)
|
||||
|
||||
l.Debug("starting resource polling for container")
|
||||
defer l.Debug("stopped resource polling for container")
|
||||
|
||||
if e.State() == environment.ProcessOfflineState {
|
||||
return errors.New("attempting to enable resource polling on a stopped server instance")
|
||||
return errors.New("cannot enable resource polling on a stopped server")
|
||||
}
|
||||
|
||||
stats, err := e.client.ContainerStats(context.Background(), e.Id, true)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
defer stats.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(stats.Body)
|
||||
|
||||
@@ -35,7 +41,9 @@ func (e *Environment) pollResources(ctx context.Context) error {
|
||||
|
||||
if err := dec.Decode(&v); err != nil {
|
||||
if err != io.EOF {
|
||||
log.WithField("container_id", e.Id).Warn("encountered error processing docker stats output, stopping collection")
|
||||
l.WithField("error", errors.WithStack(err)).Warn("error while processing Docker stats output for container")
|
||||
} else {
|
||||
l.Debug("io.EOF encountered during stats decode, stopping polling...")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -43,6 +51,7 @@ func (e *Environment) pollResources(ctx context.Context) error {
|
||||
|
||||
// Disable collection if the server is in an offline state and this process is still running.
|
||||
if e.State() == environment.ProcessOfflineState {
|
||||
l.Debug("process in offline state while resource polling is still active; stopping poll")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -66,11 +75,14 @@ func (e *Environment) pollResources(ctx context.Context) error {
|
||||
},
|
||||
}
|
||||
|
||||
b, _ := json.Marshal(st)
|
||||
if b, err := json.Marshal(st); err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Warn("error while marshaling stats object for environment")
|
||||
} else {
|
||||
e.Events().Publish(environment.ResourceEvent, string(b))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The "docker stats" CLI call does not return the same value as the types.MemoryStats.Usage
|
||||
// value which can be rather confusing to people trying to compare panel usage to
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"encoding/json"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type dockerLogLine struct {
|
||||
@@ -31,6 +31,15 @@ func (e *Environment) SendCommand(c string) error {
|
||||
return errors.New("attempting to send command to non-attached instance")
|
||||
}
|
||||
|
||||
if e.meta.Stop != nil {
|
||||
// If the command being processed is the same as the process stop command then we want to mark
|
||||
// the server as entering the stopping state otherwise the process will stop and Wings will think
|
||||
// it has crashed and attempt to restart it.
|
||||
if e.meta.Stop.Type == "command" && c == e.meta.Stop.Value {
|
||||
e.Events().Publish(environment.StateChangeEvent, environment.ProcessStoppingState)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := e.stream.Conn.Write([]byte(c + "\n"))
|
||||
|
||||
return errors.WithStack(err)
|
||||
@@ -38,44 +47,25 @@ func (e *Environment) SendCommand(c string) error {
|
||||
|
||||
// Reads the log file for the server. This does not care if the server is running or not, it will
|
||||
// simply try to read the last X bytes of the file and return them.
|
||||
func (e *Environment) Readlog(len int64) ([]string, error) {
|
||||
j, err := e.client.ContainerInspect(context.Background(), e.Id)
|
||||
func (e *Environment) Readlog(lines int) ([]string, error) {
|
||||
r, err := e.client.ContainerLogs(context.Background(), e.Id, types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Tail: strconv.Itoa(lines),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
var out []string
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
out = append(out, scanner.Text())
|
||||
}
|
||||
|
||||
if j.LogPath == "" {
|
||||
return nil, errors.New("empty log path defined for server")
|
||||
}
|
||||
|
||||
f, err := os.Open(j.LogPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Check if the length of the file is smaller than the amount of data that was requested
|
||||
// for reading. If so, adjust the length to be the total length of the file. If this is not
|
||||
// done an error is thrown since we're reading backwards, and not forwards.
|
||||
if stat, err := os.Stat(j.LogPath); err != nil {
|
||||
return nil, err
|
||||
} else if stat.Size() < len {
|
||||
len = stat.Size()
|
||||
}
|
||||
|
||||
// Seed to the end of the file and then move backwards until the length is met to avoid
|
||||
// reading the entirety of the file into memory.
|
||||
if _, err := f.Seek(-len, io.SeekEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := make([]byte, len)
|
||||
|
||||
if _, err := f.Read(b); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return e.parseLogToStrings(b)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Docker stores the logs for server output in a JSON format. This function will iterate over the JSON
|
||||
@@ -87,6 +77,7 @@ func (e *Environment) parseLogToStrings(b []byte) ([]string, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(b))
|
||||
for scanner.Scan() {
|
||||
var l dockerLogLine
|
||||
|
||||
// Unmarshal the contents and allow up to a single error before bailing out of the process. We
|
||||
// do this because if you're arbitrarily reading a length of the file you'll likely end up
|
||||
// with the first line in the output being improperly formatted JSON. In those cases we want to
|
||||
|
||||
@@ -9,6 +9,9 @@ const (
|
||||
ConsoleOutputEvent = "console output"
|
||||
StateChangeEvent = "state change"
|
||||
ResourceEvent = "resources"
|
||||
DockerImagePullStarted = "docker image pull started"
|
||||
DockerImagePullStatus = "docker image pull status"
|
||||
DockerImagePullCompleted = "docker image pull completed"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -89,6 +92,6 @@ type ProcessEnvironment interface {
|
||||
SendCommand(string) error
|
||||
|
||||
// Reads the log file for the process from the end backwards until the provided
|
||||
// number of bytes is met.
|
||||
Readlog(int64) ([]string, error)
|
||||
// number of lines is met.
|
||||
Readlog(int) ([]string, error)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ type Mount struct {
|
||||
// that we're mounting into the container at the Target location.
|
||||
Source string `json:"source"`
|
||||
|
||||
// Wether or not the directory is being mounted as read-only. It is up to the environment to
|
||||
// Whether or not the directory is being mounted as read-only. It is up to the environment to
|
||||
// handle this value correctly and ensure security expectations are met with its usage.
|
||||
ReadOnly bool `json:"read_only"`
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package events
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/gammazero/workerpool"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
@@ -12,14 +14,13 @@ type Event struct {
|
||||
}
|
||||
|
||||
type EventBus struct {
|
||||
sync.RWMutex
|
||||
|
||||
subscribers map[string]map[chan Event]struct{}
|
||||
mu sync.RWMutex
|
||||
pools map[string]*CallbackPool
|
||||
}
|
||||
|
||||
func New() *EventBus {
|
||||
return &EventBus{
|
||||
subscribers: make(map[string]map[chan Event]struct{}),
|
||||
pools: make(map[string]*CallbackPool),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,25 +40,36 @@ func (e *EventBus) Publish(topic string, data string) {
|
||||
}
|
||||
}
|
||||
|
||||
e.mu.RLock()
|
||||
defer e.mu.RUnlock()
|
||||
|
||||
// Acquire a read lock and loop over all of the channels registered for the topic. This
|
||||
// avoids a panic crash if the process tries to unregister the channel while this routine
|
||||
// is running.
|
||||
go func() {
|
||||
e.RLock()
|
||||
defer e.RUnlock()
|
||||
|
||||
if ch, ok := e.subscribers[t]; ok {
|
||||
for channel := range ch {
|
||||
channel <- Event{Data: data, Topic: topic}
|
||||
if cp, ok := e.pools[t]; ok {
|
||||
for _, callback := range cp.callbacks {
|
||||
c := *callback
|
||||
evt := Event{Data: data, Topic: topic}
|
||||
// Using the workerpool with one worker allows us to execute events in a FIFO manner. Running
|
||||
// this using goroutines would cause things such as console output to just output in random order
|
||||
// if more than one event is fired at the same time.
|
||||
//
|
||||
// However, the pool submission does not block the execution of this function itself, allowing
|
||||
// us to call publish without blocking any of the other pathways.
|
||||
//
|
||||
// @see https://github.com/pterodactyl/panel/issues/2303
|
||||
cp.pool.Submit(func() {
|
||||
c(evt)
|
||||
})
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Publishes a JSON message to a given topic.
|
||||
func (e *EventBus) PublishJson(topic string, data interface{}) error {
|
||||
b, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
e.Publish(topic, string(b))
|
||||
@@ -65,41 +77,46 @@ func (e *EventBus) PublishJson(topic string, data interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to an emitter topic using a channel.
|
||||
func (e *EventBus) Subscribe(topic string, ch chan Event) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
// Register a callback function that will be executed each time one of the events using the topic
|
||||
// name is called.
|
||||
func (e *EventBus) On(topic string, callback *func(Event)) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
if _, exists := e.subscribers[topic]; !exists {
|
||||
e.subscribers[topic] = make(map[chan Event]struct{})
|
||||
}
|
||||
|
||||
// Only set the channel if there is not currently a matching one for this topic. This
|
||||
// avoids registering two identical listeners for the same topic and causing pain in
|
||||
// the unsubscribe functionality as well.
|
||||
if _, exists := e.subscribers[topic][ch]; !exists {
|
||||
e.subscribers[topic][ch] = struct{}{}
|
||||
// Check if this topic has been registered at least once for the event listener, and if
|
||||
// not create an empty struct for the topic.
|
||||
if _, exists := e.pools[topic]; !exists {
|
||||
e.pools[topic] = &CallbackPool{
|
||||
callbacks: make([]*func(Event), 0),
|
||||
pool: workerpool.New(1),
|
||||
}
|
||||
}
|
||||
|
||||
// Unsubscribe a channel from a given topic.
|
||||
func (e *EventBus) Unsubscribe(topic string, ch chan Event) {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
// If this callback is not already registered as an event listener, go ahead and append
|
||||
// it to the array of callbacks for this topic.
|
||||
e.pools[topic].Add(callback)
|
||||
}
|
||||
|
||||
if _, exists := e.subscribers[topic][ch]; exists {
|
||||
delete(e.subscribers[topic], ch)
|
||||
// Removes an event listener from the bus.
|
||||
func (e *EventBus) Off(topic string, callback *func(Event)) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
if cp, ok := e.pools[topic]; ok {
|
||||
cp.Remove(callback)
|
||||
}
|
||||
}
|
||||
|
||||
// Removes all of the event listeners for the server. This is used when a server
|
||||
// is being deleted to avoid a bunch of de-reference errors cropping up. Obviously
|
||||
// should also check elsewhere and handle a server reference going nil, but this
|
||||
// won't hurt.
|
||||
func (e *EventBus) UnsubscribeAll() {
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
// Removes all of the event listeners that have been registered for any topic. Also stops the worker
|
||||
// pool to close that routine.
|
||||
func (e *EventBus) Destroy() {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
// Reset the entire struct into an empty map.
|
||||
e.subscribers = make(map[string]map[chan Event]struct{})
|
||||
// Stop every pool that exists for a given callback topic.
|
||||
for _, cp := range e.pools {
|
||||
cp.pool.Stop()
|
||||
}
|
||||
|
||||
e.pools = make(map[string]*CallbackPool)
|
||||
}
|
||||
|
||||
49
events/pool.go
Normal file
49
events/pool.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"github.com/gammazero/workerpool"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type CallbackPool struct {
|
||||
callbacks []*func(Event)
|
||||
pool *workerpool.WorkerPool
|
||||
}
|
||||
|
||||
// Pushes a new callback into the array of listeners for the pool.
|
||||
func (cp *CallbackPool) Add(callback *func(Event)) {
|
||||
if cp.index(reflect.ValueOf(callback)) < 0 {
|
||||
cp.callbacks = append(cp.callbacks, callback)
|
||||
}
|
||||
}
|
||||
|
||||
// Removes a callback from the array of registered callbacks if it exists.
|
||||
func (cp *CallbackPool) Remove(callback *func(Event)) {
|
||||
i := cp.index(reflect.ValueOf(callback))
|
||||
|
||||
// If i < 0 it means there was no index found for the given callback, meaning it was
|
||||
// never registered or was already unregistered from the listeners. Also double check
|
||||
// that we didn't somehow escape the length of the topic callback (not sure how that
|
||||
// would happen, but lets avoid a panic condition).
|
||||
if i < 0 || i >= len(cp.callbacks) {
|
||||
return
|
||||
}
|
||||
|
||||
// We can assume that the topic still exists at this point since we acquire an exclusive
|
||||
// lock on the process, and the "e.index" function cannot return a value >= 0 if there is
|
||||
// no topic already existing.
|
||||
cp.callbacks = append(cp.callbacks[:i], cp.callbacks[i+1:]...)
|
||||
}
|
||||
|
||||
// Finds the index of a given callback in the topic by comparing all of the registered callback
|
||||
// pointers to the passed function. This function does not aquire a lock as it should only be called
|
||||
// within the confines of a function that has already acquired a lock for the duration of the lookup.
|
||||
func (cp *CallbackPool) index(v reflect.Value) int {
|
||||
for i, handler := range cp.callbacks {
|
||||
if reflect.ValueOf(handler).Pointer() == v.Pointer() {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -65,14 +65,11 @@ require (
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ulikunitz/xz v0.5.7 // indirect
|
||||
go.uber.org/zap v1.15.0
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 // indirect
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/text v0.3.3 // indirect
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
|
||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 // indirect
|
||||
google.golang.org/grpc v1.31.0 // indirect
|
||||
@@ -81,5 +78,4 @@ require (
|
||||
gopkg.in/ini.v1 v1.57.0
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
|
||||
)
|
||||
|
||||
19
go.sum
19
go.sum
@@ -549,7 +549,6 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofm
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
@@ -560,19 +559,13 @@ go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -594,12 +587,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -617,7 +606,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
@@ -689,12 +677,7 @@ golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5 h1:MeC2gMlMdkd67dn17MEby3rGXRxZtWeiRXOnISfTQ74=
|
||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -777,7 +760,5 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
||||
@@ -2,16 +2,12 @@ package installer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/apex/log"
|
||||
"github.com/asaskevich/govalidator"
|
||||
"github.com/buger/jsonparser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
type Installer struct {
|
||||
@@ -95,33 +91,6 @@ func (i *Installer) Server() *server.Server {
|
||||
return i.server
|
||||
}
|
||||
|
||||
// Executes the installer process, creating the server and running through the
|
||||
// associated installation process based on the parameters passed through for
|
||||
// the server instance.
|
||||
func (i *Installer) Execute() {
|
||||
p := path.Join(config.Get().System.Data, i.Uuid())
|
||||
l := log.WithFields(log.Fields{"server": i.Uuid(), "process": "installer"})
|
||||
|
||||
l.WithField("path", p).Debug("creating required server data directory")
|
||||
if err := os.MkdirAll(p, 0755); err != nil {
|
||||
l.WithFields(log.Fields{"path": p, "error": errors.WithStack(err)}).Error("failed to create server data directory")
|
||||
return
|
||||
}
|
||||
|
||||
if err := os.Chown(p, config.Get().System.User.Uid, config.Get().System.User.Gid); err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to chown server data directory")
|
||||
return
|
||||
}
|
||||
|
||||
l.Debug("creating required environment for server instance")
|
||||
if err := i.server.Environment.Create(); err != nil {
|
||||
l.WithField("error", err).Error("failed to create environment for server")
|
||||
return
|
||||
}
|
||||
|
||||
l.Info("successfully created environment for server during install process")
|
||||
}
|
||||
|
||||
// Returns a string value from the JSON data provided.
|
||||
func getString(data []byte, key ...string) string {
|
||||
value, _ := jsonparser.GetString(data, key...)
|
||||
|
||||
@@ -76,13 +76,13 @@ func (cfr *ConfigurationFileReplacement) getKeyValue(value []byte) interface{} {
|
||||
func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error) {
|
||||
parsed, err := gabs.ParseJSON(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
for _, v := range f.Replace {
|
||||
value, err := f.LookupConfigurationValue(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Check for a wildcard character, and if found split the key on that value to
|
||||
@@ -97,12 +97,20 @@ func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error
|
||||
// time this code is being written.
|
||||
for _, child := range parsed.Path(strings.Trim(parts[0], ".")).Children() {
|
||||
if err := v.SetAtPathway(child, strings.Trim(parts[1], "."), []byte(value)); err != nil {
|
||||
return nil, err
|
||||
if errors.Is(err, gabs.ErrNotFound) {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "failed to set config value of array child")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err = v.SetAtPathway(parsed, v.Match, []byte(value)); err != nil {
|
||||
return nil, err
|
||||
if errors.Is(err, gabs.ErrNotFound) {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "unable to set config value at pathway: "+v.Match)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -110,13 +118,91 @@ func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
// Regex used to check if there is an array element present in the given pathway by looking for something
|
||||
// along the lines of "something[1]" or "something[1].nestedvalue" as the path.
|
||||
var checkForArrayElement = regexp.MustCompile(`^([^\[\]]+)\[([\d]+)](\..+)?$`)
|
||||
|
||||
// Attempt to set the value of the path depending on if it is an array or not. Gabs cannot handle array
|
||||
// values as "something[1]" but can parse them just fine. This is basically just overly complex code
|
||||
// to handle that edge case and ensure the value gets set correctly.
|
||||
//
|
||||
// Bless thee who has to touch these most unholy waters.
|
||||
func setValueAtPath(c *gabs.Container, path string, value interface{}) error {
|
||||
var err error
|
||||
|
||||
matches := checkForArrayElement.FindStringSubmatch(path)
|
||||
if len(matches) < 3 {
|
||||
// Only update the value if the pathway actually exists in the configuration, otherwise
|
||||
// do nothing.
|
||||
if c.ExistsP(path) {
|
||||
_, err = c.SetP(value, path)
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
i, _ := strconv.Atoi(matches[2])
|
||||
// Find the array element "i" or try to create it if "i" is equal to 0 and is not found
|
||||
// at the given path.
|
||||
ct, err := c.ArrayElementP(i, matches[1])
|
||||
if err != nil {
|
||||
if i != 0 || (!errors.Is(err, gabs.ErrNotArray) && !errors.Is(err, gabs.ErrNotFound)) {
|
||||
return errors.Wrap(err, "error while parsing array element at path")
|
||||
}
|
||||
|
||||
var t = make([]interface{}, 1)
|
||||
// If the length of matches is 4 it means we're trying to access an object down in this array
|
||||
// key, so make sure we generate the array as an array of objects, and not just a generic nil
|
||||
// array.
|
||||
if len(matches) == 4 {
|
||||
t = []interface{}{map[string]interface{}{}}
|
||||
}
|
||||
|
||||
// If the error is because this isn't an array or isn't found go ahead and create the array with
|
||||
// an empty object if we have additional things to set on the array, or just an empty array type
|
||||
// if there is not an object structure detected (no matches[3] available).
|
||||
if _, err = c.SetP(t, matches[1]); err != nil {
|
||||
return errors.Wrap(err, "failed to create empty array for missing element")
|
||||
}
|
||||
|
||||
// Set our cursor to be the array element we expect, which in this case is just the first element
|
||||
// since we won't run this code unless the array element is 0. There is too much complexity in trying
|
||||
// to match additional elements. In those cases the server will just have to be rebooted or something.
|
||||
ct, err = c.ArrayElementP(0, matches[1])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to find array element at path")
|
||||
}
|
||||
}
|
||||
|
||||
// Try to set the value. If the path does not exist an error will be raised to the caller which will
|
||||
// then check if the error is because the path is missing. In those cases we just ignore the error since
|
||||
// we don't want to do anything specifically when that happens.
|
||||
//
|
||||
// If there are four matches in the regex it means that we managed to also match a trailing pathway
|
||||
// for the key, which should be found in the given array key item and modified further.
|
||||
if len(matches) == 4 {
|
||||
_, err = ct.SetP(value, strings.TrimPrefix(matches[3], "."))
|
||||
} else {
|
||||
_, err = ct.Set(value)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to set value at config path: "+path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sets the value at a specific pathway, but checks if we were looking for a specific
|
||||
// value or not before doing it.
|
||||
func (cfr *ConfigurationFileReplacement) SetAtPathway(c *gabs.Container, path string, value []byte) error {
|
||||
if cfr.IfValue != "" {
|
||||
if cfr.IfValue == "" {
|
||||
return setValueAtPath(c, path, cfr.getKeyValue(value))
|
||||
}
|
||||
|
||||
// If this is a regex based matching, we need to get a little more creative since
|
||||
// we're only going to replacing part of the string, and not the whole thing.
|
||||
if c.Exists(path) && strings.HasPrefix(cfr.IfValue, "regex:") {
|
||||
if c.ExistsP(path) && strings.HasPrefix(cfr.IfValue, "regex:") {
|
||||
// We're doing some regex here.
|
||||
r, err := regexp.Compile(strings.TrimPrefix(cfr.IfValue, "regex:"))
|
||||
if err != nil {
|
||||
@@ -130,22 +216,15 @@ func (cfr *ConfigurationFileReplacement) SetAtPathway(c *gabs.Container, path st
|
||||
// using the value we got from the key. This will only replace the one match.
|
||||
v := strings.Trim(string(c.Path(path).Bytes()), "\"")
|
||||
if r.Match([]byte(v)) {
|
||||
_, err := c.SetP(r.ReplaceAllString(v, string(value)), path)
|
||||
|
||||
return err
|
||||
return setValueAtPath(c, path, r.ReplaceAllString(v, string(value)))
|
||||
}
|
||||
|
||||
return nil
|
||||
} else {
|
||||
if !c.Exists(path) || (c.Exists(path) && !bytes.Equal(c.Bytes(), []byte(cfr.IfValue))) {
|
||||
} else if !c.ExistsP(path) || (c.ExistsP(path) && !bytes.Equal(c.Bytes(), []byte(cfr.IfValue))) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err := c.SetP(cfr.getKeyValue(value), path)
|
||||
|
||||
return err
|
||||
return setValueAtPath(c, path, cfr.getKeyValue(value))
|
||||
}
|
||||
|
||||
// Looks up a configuration value on the Daemon given a dot-notated syntax.
|
||||
|
||||
@@ -3,7 +3,6 @@ package parser
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/beevik/etree"
|
||||
"github.com/buger/jsonparser"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -96,8 +96,7 @@ func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// See comment on the replacement regex to understand what exactly this is doing.
|
||||
cfr.Match = cfrMatchReplacement.ReplaceAllString(m, ".$1")
|
||||
cfr.Match = m
|
||||
|
||||
iv, err := jsonparser.GetString(data, "if_value")
|
||||
// We only check keypath here since match & replace_with should be present on all of
|
||||
@@ -163,7 +162,7 @@ func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
||||
break
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// File doesn't exist, we tried creating it, and same error is returned? Pretty
|
||||
// sure this pathway is impossible, but if not, abort here.
|
||||
if internal {
|
||||
@@ -349,33 +348,33 @@ func (f *ConfigurationFile) parseJsonFile(path string) error {
|
||||
func (f *ConfigurationFile) parseYamlFile(path string) error {
|
||||
b, err := readFileBytes(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
i := make(map[string]interface{})
|
||||
if err := yaml.Unmarshal(b, &i); err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Unmarshal the yaml data into a JSON interface such that we can work with
|
||||
// any arbitrary data structure. If we don't do this, I can't use gabs which
|
||||
// makes working with unknown JSON signficiantly easier.
|
||||
// makes working with unknown JSON significantly easier.
|
||||
jsonBytes, err := json.Marshal(dyno.ConvertMapI2MapS(i))
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Now that the data is converted, treat it just like JSON and pass it to the
|
||||
// iterator function to update values as necessary.
|
||||
data, err := f.IterateOverJson(jsonBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Remarshal the JSON into YAML format before saving it back to the disk.
|
||||
marshaled, err := yaml.Marshal(data.Data())
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path, marshaled, 0644)
|
||||
@@ -426,15 +425,46 @@ func (f *ConfigurationFile) parseTextFile(path string) error {
|
||||
// Parses a properties file and updates the values within it to match those that
|
||||
// are passed. Writes the file once completed.
|
||||
func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
||||
p, err := properties.LoadFile(path, properties.UTF8)
|
||||
// Open the file.
|
||||
f2, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
var s strings.Builder
|
||||
|
||||
// Get any header comments from the file.
|
||||
scanner := bufio.NewScanner(f2)
|
||||
for scanner.Scan() {
|
||||
text := scanner.Text()
|
||||
|
||||
if text[0] != '#' {
|
||||
break
|
||||
}
|
||||
|
||||
s.WriteString(text)
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
_ = f2.Close()
|
||||
|
||||
// Handle any scanner errors.
|
||||
if err := scanner.Err(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Decode the properties file.
|
||||
p, err := properties.LoadFile(path, properties.UTF8)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Replace any values that need to be replaced.
|
||||
for _, replace := range f.Replace {
|
||||
data, err := f.LookupConfigurationValue(replace)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
v, ok := p.Get(replace.Match)
|
||||
@@ -446,27 +476,32 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
||||
}
|
||||
|
||||
if _, _, err := p.Set(replace.Match, data); err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new file content to the string builder.
|
||||
for _, key := range p.Keys() {
|
||||
value, ok := p.Get(key)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
s.WriteString(key)
|
||||
s.WriteByte('=')
|
||||
s.WriteString(strings.Trim(strconv.QuoteToASCII(value), `"`))
|
||||
s.WriteString("\n")
|
||||
}
|
||||
|
||||
// Open the file for writing.
|
||||
w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
var s string
|
||||
// This is a copy of the properties.String() func except we don't plop spaces around
|
||||
// the key=value configurations since people like to complain about that.
|
||||
// func (p *Properties) String() string
|
||||
for _, key := range p.Keys() {
|
||||
value, _ := p.Get(key)
|
||||
|
||||
s = fmt.Sprintf("%s%s=%s\n", s, key, value)
|
||||
}
|
||||
|
||||
// Can't use the properties.Write() function since that doesn't apply our nicer formatting.
|
||||
if _, err := w.Write([]byte(s)); err != nil {
|
||||
// Write the data to the file.
|
||||
if _, err := w.Write([]byte(s.String())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ func SetAccessControlHeaders(c *gin.Context) {
|
||||
o := c.GetHeader("Origin")
|
||||
if o != config.Get().PanelLocation {
|
||||
for _, origin := range config.Get().AllowedOrigins {
|
||||
if o != origin {
|
||||
if origin != "*" && o != origin {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -31,9 +31,11 @@ func getServer(c *gin.Context) {
|
||||
func getServerLogs(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
l, _ := strconv.ParseInt(c.DefaultQuery("size", "8192"), 10, 64)
|
||||
l, _ := strconv.Atoi(c.DefaultQuery("size", "100"))
|
||||
if l <= 0 {
|
||||
l = 2048
|
||||
l = 100
|
||||
} else if l > 100 {
|
||||
l = 100
|
||||
}
|
||||
|
||||
out, err := s.ReadLogfile(l)
|
||||
@@ -84,7 +86,7 @@ func postServerPower(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Pass the actual heavy processing off to a seperate thread to handle so that
|
||||
// Pass the actual heavy processing off to a separate thread to handle so that
|
||||
// we can immediately return a response from the server. Some of these actions
|
||||
// can take quite some time, especially stopping or restarting.
|
||||
go func(s *server.Server) {
|
||||
@@ -176,7 +178,7 @@ func postServerReinstall(c *gin.Context) {
|
||||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// Deletes a server from the wings daemon and deassociates its objects.
|
||||
// Deletes a server from the wings daemon and dissociate it's objects.
|
||||
func deleteServer(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
@@ -196,7 +198,8 @@ func deleteServer(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Unsubscribe all of the event listeners.
|
||||
s.Events().UnsubscribeAll()
|
||||
s.Events().Destroy()
|
||||
s.Throttler().StopTimer()
|
||||
|
||||
// Destroy the environment; in Docker this will handle a running container and
|
||||
// forcibly terminate it before removing the container, so we do not need to handle
|
||||
|
||||
@@ -339,10 +339,22 @@ func postServerDecompressFiles(c *gin.Context) {
|
||||
}
|
||||
|
||||
if err := s.Filesystem.DecompressFile(data.RootPath, data.File); err != nil {
|
||||
// Check if the file does not exist.
|
||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
c.Status(http.StatusNotFound)
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested archive was not found.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// If the file is busy for some reason just return a nicer error to the user since there is not
|
||||
// much we specifically can do. They'll need to stop the running server process in order to overwrite
|
||||
// a file like this.
|
||||
if strings.Contains(err.Error(), "text file busy") {
|
||||
s.Log().WithField("error", err).Warn("failed to decompress file due to busy text file")
|
||||
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "One or more files this archive is attempting to overwrite are currently in use by another process. Please try again.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -378,15 +390,15 @@ func postServerUploadFiles(c *gin.Context) {
|
||||
form, err := c.MultipartForm()
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Failed to get multipart form.",
|
||||
"error": "Failed to get multipart form data from request.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
headers, ok := form.File["files"]
|
||||
if !ok {
|
||||
c.AbortWithStatusJSON(http.StatusNotModified, gin.H{
|
||||
"error": "No files were attached to the request.",
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "No files were found on the request body.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -57,7 +57,11 @@ func postCreateServer(c *gin.Context) {
|
||||
// cycle. If there are any errors they will be logged and communicated back
|
||||
// to the Panel where a reinstall may take place.
|
||||
go func(i *installer.Installer) {
|
||||
i.Execute()
|
||||
err := i.Server().CreateEnvironment()
|
||||
if err != nil {
|
||||
i.Server().Log().WithField("error", err).Error("failed to create server environment during install process")
|
||||
return
|
||||
}
|
||||
|
||||
if err := i.Server().Install(false); err != nil {
|
||||
log.WithFields(log.Fields{"server": i.Uuid(), "error": err}).Error("failed to run install process for server")
|
||||
@@ -71,7 +75,7 @@ func postCreateServer(c *gin.Context) {
|
||||
func postUpdateConfiguration(c *gin.Context) {
|
||||
// A backup of the configuration for error purposes.
|
||||
ccopy := *config.Get()
|
||||
// A copy of the configuration we're using to bind the data recevied into.
|
||||
// A copy of the configuration we're using to bind the data received into.
|
||||
cfg := *config.Get()
|
||||
|
||||
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||
|
||||
@@ -5,16 +5,16 @@ import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/buger/jsonparser"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/installer"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"go.uber.org/zap"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getServerArchive(c *gin.Context) {
|
||||
@@ -94,45 +93,34 @@ func getServerArchive(c *gin.Context) {
|
||||
func postServerArchive(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
go func(server *server.Server) {
|
||||
start := time.Now()
|
||||
|
||||
if err := server.Archiver.Archive(); err != nil {
|
||||
zap.S().Errorw("failed to get archive for server", zap.String("server", server.Id()), zap.Error(err))
|
||||
go func(s *server.Server) {
|
||||
if err := s.Archiver.Archive(); err != nil {
|
||||
s.Log().WithField("error", err).Error("failed to get archive for server")
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Debugw(
|
||||
"successfully created archive for server",
|
||||
zap.String("server", server.Id()),
|
||||
zap.Duration("time", time.Now().Sub(start).Round(time.Microsecond)),
|
||||
)
|
||||
s.Log().Debug("successfully created server archive, notifying panel")
|
||||
|
||||
r := api.NewRequester()
|
||||
rerr, err := r.SendArchiveStatus(server.Id(), true)
|
||||
rerr, err := r.SendArchiveStatus(s.Id(), true)
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to notify panel with archive status", zap.String("server", server.Id()), zap.Error(err))
|
||||
s.Log().WithField("error", err).Error("failed to notify panel of archive status")
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Errorw(
|
||||
"panel returned an error when sending the archive status",
|
||||
zap.String("server", server.Id()),
|
||||
zap.Error(errors.New(rerr.String())),
|
||||
)
|
||||
s.Log().WithField("error", rerr.String()).Error("panel returned an error when sending the archive status")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Debugw("successfully notified panel about archive status", zap.String("server", server.Id()))
|
||||
s.Log().Debug("successfully notified panel of archive status")
|
||||
}(s)
|
||||
|
||||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
func postTransfer(c *gin.Context) {
|
||||
zap.S().Debug("incoming transfer from panel")
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
buf.ReadFrom(c.Request.Body)
|
||||
|
||||
@@ -141,6 +129,7 @@ func postTransfer(c *gin.Context) {
|
||||
url, _ := jsonparser.GetString(data, "url")
|
||||
token, _ := jsonparser.GetString(data, "token")
|
||||
|
||||
l := log.WithField("server", serverID)
|
||||
// Create an http client with no timeout.
|
||||
client := &http.Client{Timeout: 0}
|
||||
|
||||
@@ -150,25 +139,25 @@ func postTransfer(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Errorw("server transfer has failed", zap.String("server", serverID))
|
||||
l.Info("server transfer failed, notifying panel")
|
||||
rerr, err := api.NewRequester().SendTransferFailure(serverID)
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to notify panel with transfer failure", zap.String("server", serverID), zap.Error(err))
|
||||
l.WithField("error", err).Error("failed to notify panel with transfer failure")
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Errorw("panel returned an error when notifying of a transfer failure", zap.String("server", serverID), zap.Error(errors.New(rerr.String())))
|
||||
l.WithField("error", errors.WithStack(rerr)).Error("received error response from panel while notifying of transfer failure")
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Debugw("successfully notified panel about transfer failure", zap.String("server", serverID))
|
||||
l.Debug("notified panel of transfer failure")
|
||||
}()
|
||||
|
||||
// Make a new GET request to the URL the panel gave us.
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to create http request", zap.Error(err))
|
||||
log.WithField("error", errors.WithStack(err)).Error("failed to create http request for archive transfer")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -178,20 +167,22 @@ func postTransfer(c *gin.Context) {
|
||||
// Execute the http request.
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to send http request", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to send archive http request")
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
// Handle non-200 status codes.
|
||||
if res.StatusCode != 200 {
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
_, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to read response body", zap.Int("status", res.StatusCode), zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).WithField("status", res.StatusCode).Error("failed read transfer response body")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Errorw("failed to request server archive", zap.Int("status", res.StatusCode), zap.String("body", string(body)))
|
||||
l.WithField("error", errors.WithStack(err)).WithField("status", res.StatusCode).Error("failed to request server archive")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -202,12 +193,13 @@ func postTransfer(c *gin.Context) {
|
||||
_, err = os.Stat(archivePath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
zap.S().Errorw("failed to stat file", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to stat archive file")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := os.Remove(archivePath); err != nil {
|
||||
zap.S().Errorw("failed to delete old file", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Warn("failed to remove old archive file")
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -215,7 +207,8 @@ func postTransfer(c *gin.Context) {
|
||||
// Create the file.
|
||||
file, err := os.Create(archivePath)
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to open file on disk", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to open archive on disk")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -223,21 +216,24 @@ func postTransfer(c *gin.Context) {
|
||||
buf := make([]byte, 1024*4)
|
||||
_, err = io.CopyBuffer(file, res.Body, buf)
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to copy file to disk", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to copy archive file to disk")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Close the file so it can be opened to verify the checksum.
|
||||
if err := file.Close(); err != nil {
|
||||
zap.S().Errorw("failed to close archive file", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to close archive file")
|
||||
|
||||
return
|
||||
}
|
||||
zap.S().Debug("server archive has been downloaded, computing checksum..", zap.String("server", serverID))
|
||||
|
||||
l.WithField("server", serverID).Debug("server archive downloaded, computing checksum...")
|
||||
|
||||
// Open the archive file for computing a checksum.
|
||||
file, err = os.Open(archivePath)
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to open file on disk", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to open archive on disk")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -245,35 +241,35 @@ func postTransfer(c *gin.Context) {
|
||||
hash := sha256.New()
|
||||
buf = make([]byte, 1024*4)
|
||||
if _, err := io.CopyBuffer(hash, file, buf); err != nil {
|
||||
zap.S().Errorw("failed to copy file for checksum verification", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to copy archive file for checksum verification")
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the two checksums.
|
||||
if hex.EncodeToString(hash.Sum(nil)) != res.Header.Get("X-Checksum") {
|
||||
zap.S().Errorw("checksum failed verification")
|
||||
l.Error("checksum verification failed for archive")
|
||||
return
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
if err := file.Close(); err != nil {
|
||||
zap.S().Errorw("failed to close archive file", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to close archive file after calculating checksum")
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Infow("server archive transfer was successful", zap.String("server", serverID))
|
||||
l.Info("server archive transfer was successful")
|
||||
|
||||
// Get the server data from the request.
|
||||
serverData, t, _, _ := jsonparser.Get(data, "server")
|
||||
if t != jsonparser.Object {
|
||||
zap.S().Errorw("invalid server data passed in request")
|
||||
l.Error("invalid server data passed in request")
|
||||
return
|
||||
}
|
||||
|
||||
// Create a new server installer (note this does not execute the install script)
|
||||
i, err := installer.New(serverData)
|
||||
if err != nil {
|
||||
zap.S().Warnw("failed to validate the received server data", zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to validate received server data")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -281,11 +277,14 @@ func postTransfer(c *gin.Context) {
|
||||
server.GetServers().Add(i.Server())
|
||||
|
||||
// Create the server's environment (note this does not execute the install script)
|
||||
i.Execute()
|
||||
if err := i.Server().CreateEnvironment(); err != nil {
|
||||
l.WithField("error", err).Error("failed to create server environment")
|
||||
return
|
||||
}
|
||||
|
||||
// Un-archive the archive. That sounds weird..
|
||||
if err := archiver.NewTarGz().Unarchive(archivePath, i.Server().Filesystem.Path()); err != nil {
|
||||
zap.S().Errorw("failed to extract archive", zap.String("server", serverID), zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to extract server archive")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -300,15 +299,16 @@ func postTransfer(c *gin.Context) {
|
||||
rerr, err := api.NewRequester().SendTransferSuccess(serverID)
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to notify panel with transfer success", zap.String("server", serverID), zap.Error(err))
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to notify panel of transfer success")
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Errorw("panel returned an error when notifying of a transfer success", zap.String("server", serverID), zap.Error(errors.New(rerr.String())))
|
||||
l.WithField("error", errors.WithStack(rerr)).Error("panel responded with error after transfer success")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Debugw("successfully notified panel about transfer success", zap.String("server", serverID))
|
||||
l.Info("successfully notified panel of transfer success")
|
||||
}(buf.Bytes())
|
||||
|
||||
c.Status(http.StatusAccepted)
|
||||
|
||||
@@ -28,7 +28,7 @@ func (h *Handler) ListenForExpiration(ctx context.Context) {
|
||||
if jwt != nil {
|
||||
if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 0 {
|
||||
_ = h.SendJson(&Message{Event: TokenExpiredEvent})
|
||||
} else if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 180 {
|
||||
} else if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 60 {
|
||||
_ = h.SendJson(&Message{Event: TokenExpiringEvent})
|
||||
}
|
||||
}
|
||||
@@ -36,10 +36,7 @@ func (h *Handler) ListenForExpiration(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
// Listens for different events happening on a server and sends them along
|
||||
// to the connected websocket.
|
||||
func (h *Handler) ListenForServerEvents(ctx context.Context) {
|
||||
e := []string{
|
||||
var e = []string{
|
||||
server.StatsEvent,
|
||||
server.StatusEvent,
|
||||
server.ConsoleOutputEvent,
|
||||
@@ -50,24 +47,29 @@ func (h *Handler) ListenForServerEvents(ctx context.Context) {
|
||||
server.BackupCompletedEvent,
|
||||
}
|
||||
|
||||
eventChannel := make(chan events.Event)
|
||||
for _, event := range e {
|
||||
h.server.Events().Subscribe(event, eventChannel)
|
||||
// Listens for different events happening on a server and sends them along
|
||||
// to the connected websocket.
|
||||
func (h *Handler) ListenForServerEvents(ctx context.Context) {
|
||||
h.server.Log().Debug("listening for server events over websocket")
|
||||
callback := func(e events.Event) {
|
||||
if err := h.SendJson(&Message{Event: e.Topic, Args: []string{e.Data}}); err != nil {
|
||||
h.server.Log().WithField("error", err).Warn("error while sending server data over websocket")
|
||||
}
|
||||
}
|
||||
|
||||
for d := range eventChannel {
|
||||
// Subscribe to all of the events with the same callback that will push the data out over the
|
||||
// websocket for the server.
|
||||
for _, evt := range e {
|
||||
h.server.Events().On(evt, &callback)
|
||||
}
|
||||
|
||||
go func(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
for _, event := range e {
|
||||
h.server.Events().Unsubscribe(event, eventChannel)
|
||||
}
|
||||
|
||||
close(eventChannel)
|
||||
default:
|
||||
_ = h.SendJson(&Message{
|
||||
Event: d.Topic,
|
||||
Args: []string{d.Data},
|
||||
})
|
||||
// Once this context is stopped, de-register all of the listeners that have been registered.
|
||||
for _, evt := range e {
|
||||
h.server.Events().Off(evt, &callback)
|
||||
}
|
||||
}
|
||||
}(ctx)
|
||||
}
|
||||
|
||||
@@ -12,12 +12,7 @@ const (
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
// The event to perform. Should be one of the following that are supported:
|
||||
//
|
||||
// - status : Returns the server's power state.
|
||||
// - logs : Returns the server log data at the time of the request.
|
||||
// - power : Performs a power action aganist the server based the data.
|
||||
// - command : Performs a command on a server using the data field.
|
||||
// The event to perform.
|
||||
Event string `json:"event"`
|
||||
|
||||
// The data to pass along, only used by power/command currently. Other requests
|
||||
|
||||
@@ -37,6 +37,19 @@ type Handler struct {
|
||||
server *server.Server
|
||||
}
|
||||
|
||||
var (
|
||||
ErrJwtNotPresent = errors.New("jwt: no jwt present")
|
||||
ErrJwtNoConnectPerm = errors.New("jwt: missing connect permission")
|
||||
ErrJwtUuidMismatch = errors.New("jwt: server uuid mismatch")
|
||||
)
|
||||
|
||||
func IsJwtError(err error) bool {
|
||||
return errors.Is(err, ErrJwtNotPresent) ||
|
||||
errors.Is(err, ErrJwtNoConnectPerm) ||
|
||||
errors.Is(err, ErrJwtUuidMismatch) ||
|
||||
errors.Is(err, jwt.ErrExpValidation)
|
||||
}
|
||||
|
||||
// Parses a JWT into a websocket token payload.
|
||||
func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
|
||||
payload := tokens.WebsocketPayload{}
|
||||
@@ -64,6 +77,10 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
|
||||
}
|
||||
|
||||
for _, origin := range config.Get().AllowedOrigins {
|
||||
if origin == "*" {
|
||||
return true
|
||||
}
|
||||
|
||||
if o != origin {
|
||||
continue
|
||||
}
|
||||
@@ -88,9 +105,13 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
|
||||
}
|
||||
|
||||
func (h *Handler) SendJson(v *Message) error {
|
||||
// Do not send JSON down the line if the JWT on the connection is not
|
||||
// valid!
|
||||
// Do not send JSON down the line if the JWT on the connection is not valid!
|
||||
if err := h.TokenValid(); err != nil {
|
||||
h.unsafeSendJson(Message{
|
||||
Event: ErrorEvent,
|
||||
Args: []string{"could not authenticate client: " + err.Error()},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -130,7 +151,7 @@ func (h *Handler) unsafeSendJson(v interface{}) error {
|
||||
func (h *Handler) TokenValid() error {
|
||||
j := h.GetJwt()
|
||||
if j == nil {
|
||||
return errors.New("no jwt present")
|
||||
return ErrJwtNotPresent
|
||||
}
|
||||
|
||||
if err := jwt.ExpirationTimeValidator(time.Now())(&j.Payload); err != nil {
|
||||
@@ -138,11 +159,11 @@ func (h *Handler) TokenValid() error {
|
||||
}
|
||||
|
||||
if !j.HasPermission(PermissionConnect) {
|
||||
return errors.New("jwt does not have connect permission")
|
||||
return ErrJwtNoConnectPerm
|
||||
}
|
||||
|
||||
if h.server.Id() != j.GetServerUuid() {
|
||||
return errors.New("jwt server uuid mismatch")
|
||||
return ErrJwtUuidMismatch
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -153,9 +174,10 @@ func (h *Handler) TokenValid() error {
|
||||
// error message, otherwise we just send back a standard error message.
|
||||
func (h *Handler) SendErrorJson(msg Message, err error, shouldLog ...bool) error {
|
||||
j := h.GetJwt()
|
||||
expected := errors.Is(err, server.ErrSuspended) || errors.Is(err, server.ErrIsRunning)
|
||||
|
||||
message := "an unexpected error was encountered while handling this request"
|
||||
if server.IsSuspendedError(err) || (j != nil && j.HasPermission(PermissionReceiveErrors)) {
|
||||
if expected || (j != nil && j.HasPermission(PermissionReceiveErrors)) {
|
||||
message = err.Error()
|
||||
}
|
||||
|
||||
@@ -165,7 +187,7 @@ func (h *Handler) SendErrorJson(msg Message, err error, shouldLog ...bool) error
|
||||
wsm.Args = []string{m}
|
||||
|
||||
if len(shouldLog) == 0 || (len(shouldLog) == 1 && shouldLog[0] == true) {
|
||||
if !server.IsSuspendedError(err) {
|
||||
if !expected && !IsJwtError(err) {
|
||||
h.server.Log().WithFields(log.Fields{"event": msg.Event, "error_identifier": u.String(), "error": err}).
|
||||
Error("failed to handle websocket process; an error was encountered processing an event")
|
||||
}
|
||||
@@ -202,8 +224,6 @@ func (h *Handler) GetJwt() *tokens.WebsocketPayload {
|
||||
func (h *Handler) HandleInbound(m Message) error {
|
||||
if m.Event != AuthenticationEvent {
|
||||
if err := h.TokenValid(); err != nil {
|
||||
log.WithField("message", err.Error()).Debug("jwt for server websocket is no longer valid")
|
||||
|
||||
h.unsafeSendJson(Message{
|
||||
Event: ErrorEvent,
|
||||
Args: []string{"could not authenticate client: " + err.Error()},
|
||||
@@ -309,7 +329,7 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
logs, err := h.server.Environment.Readlog(1024 * 16)
|
||||
logs, err := h.server.Environment.Readlog(100)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,60 +1,111 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/mitchellh/colorstring"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrTooMuchConsoleData = errors.New("console is outputting too much data")
|
||||
|
||||
type ConsoleThrottler struct {
|
||||
sync.RWMutex
|
||||
mu sync.Mutex
|
||||
config.ConsoleThrottles
|
||||
|
||||
// The total number of activations that have occurred thus far.
|
||||
activations uint64
|
||||
|
||||
// The total number of lines that have been sent since the last reset timer period.
|
||||
count uint64
|
||||
|
||||
// Wether or not the console output is being throttled. It is up to calling code to
|
||||
// determine what to do if it is.
|
||||
isThrottled system.AtomicBool
|
||||
|
||||
// The total number of lines processed so far during the given time period.
|
||||
lines uint64
|
||||
|
||||
lastIntervalTime *time.Time
|
||||
lastDecayTime *time.Time
|
||||
timerCancel *context.CancelFunc
|
||||
}
|
||||
|
||||
// Increments the number of activations for a server.
|
||||
func (ct *ConsoleThrottler) AddActivation() uint64 {
|
||||
ct.Lock()
|
||||
defer ct.Unlock()
|
||||
|
||||
ct.activations += 1
|
||||
|
||||
return ct.activations
|
||||
// Resets the state of the throttler.
|
||||
func (ct *ConsoleThrottler) Reset() {
|
||||
atomic.StoreUint64(&ct.count, 0)
|
||||
atomic.StoreUint64(&ct.activations, 0)
|
||||
ct.isThrottled.Set(false)
|
||||
}
|
||||
|
||||
// Decrements the number of activations for a server.
|
||||
func (ct *ConsoleThrottler) RemoveActivation() uint64 {
|
||||
ct.Lock()
|
||||
defer ct.Unlock()
|
||||
|
||||
if ct.activations == 0 {
|
||||
// Triggers an activation for a server. You can also decrement the number of activations
|
||||
// by passing a negative number.
|
||||
func (ct *ConsoleThrottler) markActivation(increment bool) uint64 {
|
||||
if !increment {
|
||||
if atomic.LoadUint64(&ct.activations) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
ct.activations -= 1
|
||||
|
||||
return ct.activations
|
||||
// This weird dohicky subtracts 1 from the activation count.
|
||||
return atomic.AddUint64(&ct.activations, ^uint64(0))
|
||||
}
|
||||
|
||||
// Increment the total count of lines that we have processed so far.
|
||||
func (ct *ConsoleThrottler) IncrementLineCount() uint64 {
|
||||
return atomic.AddUint64(&ct.lines, 1)
|
||||
return atomic.AddUint64(&ct.activations, 1)
|
||||
}
|
||||
|
||||
// Reset the line count to zero.
|
||||
func (ct *ConsoleThrottler) ResetLineCount() {
|
||||
atomic.SwapUint64(&ct.lines, 0)
|
||||
// Determines if the console is currently being throttled. Calls to this function can be used to
|
||||
// determine if output should be funneled along to the websocket processes.
|
||||
func (ct *ConsoleThrottler) Throttled() bool {
|
||||
return ct.isThrottled.Get()
|
||||
}
|
||||
|
||||
// Starts a timer that runs in a seperate thread and will continually decrement the lines processed
|
||||
// and number of activations, regardless of the current console message volume.
|
||||
func (ct *ConsoleThrottler) StartTimer() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
reset := time.NewTicker(time.Duration(int64(ct.LineResetInterval)) * time.Millisecond)
|
||||
decay := time.NewTicker(time.Duration(int64(ct.DecayInterval)) * time.Millisecond)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
reset.Stop()
|
||||
return
|
||||
case <-reset.C:
|
||||
ct.isThrottled.Set(false)
|
||||
atomic.StoreUint64(&ct.count, 0)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
decay.Stop()
|
||||
return
|
||||
case <-decay.C:
|
||||
ct.markActivation(false)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ct.timerCancel = &cancel
|
||||
}
|
||||
|
||||
// Stops a running timer processes if one exists. This is only called when the server is deleted since
|
||||
// we want this to always be running. If there is no process currently running nothing will really happen.
|
||||
func (ct *ConsoleThrottler) StopTimer() {
|
||||
ct.mu.Lock()
|
||||
defer ct.mu.Unlock()
|
||||
if ct.timerCancel != nil {
|
||||
c := *ct.timerCancel
|
||||
c()
|
||||
ct.timerCancel = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Handles output from a server's console. This code ensures that a server is not outputting
|
||||
@@ -70,32 +121,42 @@ func (ct *ConsoleThrottler) ResetLineCount() {
|
||||
// data all at once. These values are all configurable via the wings configuration file, however the
|
||||
// defaults have been in the wild for almost two years at the time of this writing, so I feel quite
|
||||
// confident in them.
|
||||
func (ct *ConsoleThrottler) Handle() {
|
||||
//
|
||||
// This function returns an error if the server should be stopped due to violating throttle constraints
|
||||
// and a boolean value indicating if a throttle is being violated when it is checked.
|
||||
func (ct *ConsoleThrottler) Increment(onTrigger func()) error {
|
||||
if !ct.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Increment the line count and if we have now output more lines than are allowed, trigger a throttle
|
||||
// activation. Once the throttle is triggered and has passed the kill at value we will trigger a server
|
||||
// stop automatically.
|
||||
if atomic.AddUint64(&ct.count, 1) >= ct.Lines && !ct.Throttled() {
|
||||
ct.isThrottled.Set(true)
|
||||
if ct.markActivation(true) >= ct.MaximumTriggerCount {
|
||||
return ErrTooMuchConsoleData
|
||||
}
|
||||
|
||||
onTrigger()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the throttler instance for the server or creates a new one.
|
||||
func (s *Server) Throttler() *ConsoleThrottler {
|
||||
s.throttleLock.RLock()
|
||||
s.throttleLock.Lock()
|
||||
defer s.throttleLock.Unlock()
|
||||
|
||||
if s.throttler == nil {
|
||||
// Release the read lock so that we can acquire a normal lock on the process and
|
||||
// make modifications to the throttler.
|
||||
s.throttleLock.RUnlock()
|
||||
|
||||
s.throttleLock.Lock()
|
||||
s.throttler = &ConsoleThrottler{
|
||||
ConsoleThrottles: config.Get().Throttles,
|
||||
}
|
||||
s.throttleLock.Unlock()
|
||||
|
||||
return s.throttler
|
||||
} else {
|
||||
defer s.throttleLock.RUnlock()
|
||||
return s.throttler
|
||||
}
|
||||
}
|
||||
|
||||
return s.throttler
|
||||
}
|
||||
|
||||
// Sends output to the server console formatted to appear correctly as being sent
|
||||
// from Wings.
|
||||
|
||||
@@ -35,7 +35,7 @@ func (cd *CrashHandler) SetLastCrash(t time.Time) {
|
||||
// if it was the result of an event that we should try to recover from.
|
||||
//
|
||||
// This function assumes it is called under circumstances where a crash is suspected
|
||||
// of occuring. It will not do anything to determine if it was actually a crash, just
|
||||
// of occurring. It will not do anything to determine if it was actually a crash, just
|
||||
// look at the exit state and check if it meets the criteria of being called a crash
|
||||
// by Wings.
|
||||
//
|
||||
|
||||
@@ -1,17 +1,9 @@
|
||||
package server
|
||||
|
||||
type suspendedError struct {
|
||||
}
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
func (e *suspendedError) Error() string {
|
||||
return "server is currently in a suspended state"
|
||||
}
|
||||
|
||||
func IsSuspendedError(err error) bool {
|
||||
_, ok := err.(*suspendedError)
|
||||
|
||||
return ok
|
||||
}
|
||||
var ErrIsRunning = errors.New("server is running")
|
||||
var ErrSuspended = errors.New("server is currently in a suspended state")
|
||||
|
||||
type crashTooFrequent struct {
|
||||
}
|
||||
|
||||
@@ -29,6 +29,8 @@ import (
|
||||
// Error returned when there is a bad path provided to one of the FS calls.
|
||||
type PathResolutionError struct{}
|
||||
|
||||
var ErrNotEnoughDiskSpace = errors.New("not enough disk space is available to perform this operation")
|
||||
|
||||
// Returns the error response in a string form that can be more easily consumed.
|
||||
func (pre PathResolutionError) Error() string {
|
||||
return "invalid path resolution"
|
||||
@@ -42,6 +44,7 @@ func IsPathResolutionError(err error) bool {
|
||||
|
||||
type Filesystem struct {
|
||||
mu sync.Mutex
|
||||
lookupTimeMu sync.RWMutex
|
||||
|
||||
lastLookupTime time.Time
|
||||
lookupInProgress int32
|
||||
@@ -183,14 +186,14 @@ func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
|
||||
pi := p
|
||||
|
||||
// Recursively call this function to continue digging through the directory tree within
|
||||
// a seperate goroutine. If the context is canceled abort this process.
|
||||
// a separate goroutine. If the context is canceled abort this process.
|
||||
g.Go(func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// If the callback returns true, go ahead and keep walking deeper. This allows
|
||||
// us to programatically continue deeper into directories, or stop digging
|
||||
// us to programmatically continue deeper into directories, or stop digging
|
||||
// if that pathway knows it needs nothing else.
|
||||
if c, err := fs.SafePath(pi); err != nil {
|
||||
return err
|
||||
@@ -254,17 +257,25 @@ func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
||||
// This is primarily to avoid a bunch of I/O operations from piling up on the server, especially on servers
|
||||
// with a large amount of files.
|
||||
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
|
||||
// Check if cache is expired...
|
||||
if !fs.lastLookupTime.After(time.Now().Add(time.Second * -150)) {
|
||||
// If we are now allowing a stale response, or there is no lookup currently in progress, go ahead
|
||||
// and perform the lookup and return the fresh value. This is a blocking operation to the calling
|
||||
// process.
|
||||
if !allowStaleValue || atomic.LoadInt32(&fs.lookupInProgress) == 0 {
|
||||
return fs.updateCachedDiskUsage()
|
||||
}
|
||||
// Check if cache is expired.
|
||||
fs.lookupTimeMu.RLock()
|
||||
isValidInCache := fs.lastLookupTime.After(time.Now().Add(time.Second * time.Duration(-1*config.Get().System.DiskCheckInterval)))
|
||||
fs.lookupTimeMu.RUnlock()
|
||||
|
||||
// Otherwise, just go ahead and perform the cached disk usage update.
|
||||
go fs.updateCachedDiskUsage()
|
||||
if !isValidInCache {
|
||||
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
|
||||
// value. This is a blocking operation to the calling process.
|
||||
if !allowStaleValue {
|
||||
return fs.updateCachedDiskUsage()
|
||||
} else if atomic.LoadInt32(&fs.lookupInProgress) == 0 {
|
||||
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
|
||||
// currently performing a lookup, just do the disk usage calculation in the background.
|
||||
go func(fs *Filesystem) {
|
||||
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
||||
fs.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to determine disk usage in go-routine")
|
||||
}
|
||||
}(fs)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the currently cached value back to the calling function.
|
||||
@@ -279,11 +290,11 @@ func (fs *Filesystem) updateCachedDiskUsage() (int64, error) {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
|
||||
// Always clear the in progress flag when this process finishes.
|
||||
defer atomic.StoreInt32(&fs.lookupInProgress, 0)
|
||||
|
||||
// Signal that we're currently updating the disk size, to prevent other routines to block on this.
|
||||
// Signal that we're currently updating the disk size so that other calls to the disk checking
|
||||
// functions can determine if they should queue up additional calls to this function. Ensure that
|
||||
// we always set this back to 0 when this process is done executing.
|
||||
atomic.StoreInt32(&fs.lookupInProgress, 1)
|
||||
defer atomic.StoreInt32(&fs.lookupInProgress, 0)
|
||||
|
||||
// If there is no size its either because there is no data (in which case running this function
|
||||
// will have effectively no impact), or there is nothing in the cache, in which case we need to
|
||||
@@ -294,7 +305,10 @@ func (fs *Filesystem) updateCachedDiskUsage() (int64, error) {
|
||||
// Always cache the size, even if there is an error. We want to always return that value
|
||||
// so that we don't cause an endless loop of determining the disk size if there is a temporary
|
||||
// error encountered.
|
||||
fs.lookupTimeMu.Lock()
|
||||
fs.lastLookupTime = time.Now()
|
||||
fs.lookupTimeMu.Unlock()
|
||||
|
||||
atomic.StoreInt64(&fs.disk, size)
|
||||
|
||||
return size, err
|
||||
@@ -388,9 +402,10 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
||||
currentSize = stat.Size()
|
||||
}
|
||||
|
||||
o := &fileOpener{}
|
||||
// This will either create the file if it does not already exist, or open and
|
||||
// truncate the existing file.
|
||||
file, err := os.OpenFile(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
file, err := o.open(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -647,7 +662,7 @@ func (fs *Filesystem) Copy(p string) error {
|
||||
// Deletes a file or folder from the system. Prevents the user from accidentally
|
||||
// (or maliciously) removing their root server data directory.
|
||||
func (fs *Filesystem) Delete(p string) error {
|
||||
// This is one of the few (only?) places in the codebase where we're explictly not using
|
||||
// This is one of the few (only?) places in the codebase where we're explicitly not using
|
||||
// the SafePath functionality when working with user provided input. If we did, you would
|
||||
// not be able to delete a file that is a symlink pointing to a location outside of the data
|
||||
// directory.
|
||||
@@ -770,6 +785,10 @@ func (fs *Filesystem) EnsureDataDirectory() error {
|
||||
if err := os.MkdirAll(fs.Path(), 0700); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := fs.Chown("/"); err != nil {
|
||||
fs.Server.Log().WithField("error", err).Warn("failed to chown server data directory")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -921,3 +940,29 @@ func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type fileOpener struct {
|
||||
busy uint
|
||||
}
|
||||
|
||||
// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
|
||||
// cannot be opened because of a "text file busy" error, we will attempt until the number of attempts
|
||||
// has been exhaused, at which point we will abort with an error.
|
||||
func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File, error) {
|
||||
for {
|
||||
f, err := os.OpenFile(path, flags, perm)
|
||||
|
||||
// If there is an error because the text file is busy, go ahead and sleep for a few
|
||||
// hundred milliseconds and then try again up to three times before just returning the
|
||||
// error back to the caller.
|
||||
//
|
||||
// Based on code from: https://github.com/golang/go/issues/22220#issuecomment-336458122
|
||||
if err != nil && fo.busy < 3 && strings.Contains(err.Error(), "text file busy") {
|
||||
time.Sleep(100 * time.Millisecond << fo.busy)
|
||||
fo.busy++
|
||||
continue
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
}
|
||||
@@ -9,5 +9,5 @@ import (
|
||||
func (s *Stat) CTime() time.Time {
|
||||
st := s.Info.Sys().(*syscall.Stat_t)
|
||||
|
||||
return time.Unix(int64(st.Ctimespec.Sec), int64(st.Ctimespec.Nsec))
|
||||
return time.Unix(st.Ctimespec.Sec, st.Ctimespec.Nsec)
|
||||
}
|
||||
@@ -9,5 +9,5 @@ import (
|
||||
func (s *Stat) CTime() time.Time {
|
||||
st := s.Info.Sys().(*syscall.Stat_t)
|
||||
|
||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
||||
return time.Unix(st.Ctim.Sec, st.Ctim.Nsec)
|
||||
}
|
||||
@@ -32,14 +32,17 @@ func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (b
|
||||
dirSize, err := fs.DiskUsage(false)
|
||||
|
||||
var size int64
|
||||
var max = fs.Server.DiskSpace() * 1000.0 * 1000.0
|
||||
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
|
||||
archiver.Walk(source, func(f archiver.File) error {
|
||||
atomic.AddInt64(&size, f.Size())
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
if atomic.AddInt64(&size, f.Size()) + dirSize > max {
|
||||
return errors.WithStack(ErrNotEnoughDiskSpace)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return ((dirSize + size) / 1000.0 / 1000.0) <= fs.Server.DiskSpace(), errors.WithStack(err)
|
||||
return err == nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Decompress a file in a given directory by using the archiver tool to infer the file
|
||||
@@ -80,6 +83,11 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||
return errors.New(fmt.Sprintf("could not parse underlying data source with type %s", reflect.TypeOf(s).String()))
|
||||
}
|
||||
|
||||
return errors.Wrap(fs.Writefile(name, f), "could not extract file from archive")
|
||||
p, err := fs.SafePath(filepath.Join(dir, name))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to generate a safe path to server file")
|
||||
}
|
||||
|
||||
return errors.Wrap(fs.Writefile(p, f), "could not extract file from archive")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
// Executes the installation stack for a server process. Bubbles any errors up to the calling
|
||||
// function which should handle contacting the panel to notify it of the server state.
|
||||
//
|
||||
// Pass true as the first arugment in order to execute a server sync before the process to
|
||||
// Pass true as the first argument in order to execute a server sync before the process to
|
||||
// ensure the latest information is used.
|
||||
func (s *Server) Install(sync bool) error {
|
||||
if sync {
|
||||
@@ -197,7 +197,7 @@ func (ip *InstallationProcess) RemoveContainer() {
|
||||
}
|
||||
}
|
||||
|
||||
// Runs the installation process, this is done as a backgrounded thread. This will configure
|
||||
// Runs the installation process, this is done as in a background thread. This will configure
|
||||
// the required environment, and then spin up the installation container.
|
||||
//
|
||||
// Once the container finishes installing the results will be stored in an installation
|
||||
@@ -210,7 +210,7 @@ func (ip *InstallationProcess) Run() error {
|
||||
|
||||
// We now have an exclusive lock on this installation process. Ensure that whenever this
|
||||
// process is finished that the semaphore is released so that other processes and be executed
|
||||
// without encounting a wait timeout.
|
||||
// without encountering a wait timeout.
|
||||
defer func() {
|
||||
ip.Server.Log().Debug("releasing installation process lock")
|
||||
ip.Server.installer.sem.Release(1)
|
||||
@@ -464,13 +464,13 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
||||
ip.Server.Events().Publish(DaemonMessageEvent, "Installation process completed.")
|
||||
}(r.ID)
|
||||
|
||||
sChann, eChann := ip.client.ContainerWait(ip.context, r.ID, container.WaitConditionNotRunning)
|
||||
sChan, eChan := ip.client.ContainerWait(ip.context, r.ID, container.WaitConditionNotRunning)
|
||||
select {
|
||||
case err := <-eChann:
|
||||
case err := <-eChan:
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
case <-sChann:
|
||||
case <-sChan:
|
||||
}
|
||||
|
||||
return r.ID, nil
|
||||
|
||||
@@ -5,40 +5,75 @@ import (
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/events"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Adds all of the internal event listeners we want to use for a server.
|
||||
var dockerEvents = []string{
|
||||
environment.DockerImagePullStatus,
|
||||
environment.DockerImagePullStarted,
|
||||
environment.DockerImagePullCompleted,
|
||||
}
|
||||
|
||||
// Adds all of the internal event listeners we want to use for a server. These listeners can only be
|
||||
// removed by deleting the server as they should last for the duration of the process' lifetime.
|
||||
func (s *Server) StartEventListeners() {
|
||||
console := make(chan events.Event)
|
||||
state := make(chan events.Event)
|
||||
stats := make(chan events.Event)
|
||||
console := func(e events.Event) {
|
||||
t := s.Throttler()
|
||||
err := t.Increment(func () {
|
||||
s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.")
|
||||
})
|
||||
|
||||
s.Environment.Events().Subscribe(environment.ConsoleOutputEvent, console)
|
||||
s.Environment.Events().Subscribe(environment.StateChangeEvent, state)
|
||||
s.Environment.Events().Subscribe(environment.ResourceEvent, stats)
|
||||
|
||||
// TODO: this is leaky I imagine since the routines aren't destroyed when the server is?
|
||||
// An error is only returned if the server has breached the thresholds set.
|
||||
if err != nil {
|
||||
// If the process is already stopping, just let it continue with that action rather than attempting
|
||||
// to terminate again.
|
||||
if s.GetState() != environment.ProcessStoppingState {
|
||||
s.SetState(environment.ProcessStoppingState)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case data := <-console:
|
||||
// Immediately emit this event back over the server event stream since it is
|
||||
// being called from the environment event stream and things probably aren't
|
||||
// listening to that event.
|
||||
s.Events().Publish(ConsoleOutputEvent, data.Data)
|
||||
s.Log().Warn("stopping server instance, violating throttle limits")
|
||||
s.PublishConsoleOutputFromDaemon("Your server is being stopped for outputting too much data in a short period of time.")
|
||||
// Completely skip over server power actions and terminate the running instance. This gives the
|
||||
// server 15 seconds to finish stopping gracefully before it is forcefully terminated.
|
||||
if err := s.Environment.WaitForStop(config.Get().Throttles.StopGracePeriod, true); err != nil {
|
||||
// If there is an error set the process back to running so that this throttler is called
|
||||
// again and hopefully kills the server.
|
||||
if s.GetState() != environment.ProcessOfflineState {
|
||||
s.SetState(environment.ProcessRunningState)
|
||||
}
|
||||
|
||||
s.Log().WithField("error", errors.WithStack(err)).Error("failed to terminate environment after triggering throttle")
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// If we are not throttled, go ahead and output the data.
|
||||
if !t.Throttled() {
|
||||
s.Events().Publish(ConsoleOutputEvent, e.Data)
|
||||
}
|
||||
|
||||
// Also pass the data along to the console output channel.
|
||||
s.onConsoleOutput(data.Data)
|
||||
case data := <-state:
|
||||
s.SetState(data.Data)
|
||||
case data := <-stats:
|
||||
s.onConsoleOutput(e.Data)
|
||||
}
|
||||
|
||||
state := func(e events.Event) {
|
||||
// Reset the throttler when the process is started.
|
||||
if e.Data == environment.ProcessStartingState {
|
||||
s.Throttler().Reset()
|
||||
}
|
||||
|
||||
s.SetState(e.Data)
|
||||
}
|
||||
|
||||
stats := func(e events.Event) {
|
||||
st := new(environment.Stats)
|
||||
if err := json.Unmarshal([]byte(data.Data), st); err != nil {
|
||||
if err := json.Unmarshal([]byte(e.Data), st); err != nil {
|
||||
s.Log().WithField("error", errors.WithStack(err)).Warn("failed to unmarshal server environment stats")
|
||||
continue
|
||||
return
|
||||
}
|
||||
|
||||
// Update the server resource tracking object with the resources we got here.
|
||||
@@ -50,8 +85,24 @@ func (s *Server) StartEventListeners() {
|
||||
|
||||
s.emitProcUsage()
|
||||
}
|
||||
|
||||
docker := func(e events.Event) {
|
||||
if e.Topic == environment.DockerImagePullStatus {
|
||||
s.Events().Publish(InstallOutputEvent, e.Data)
|
||||
} else if e.Topic == environment.DockerImagePullStarted {
|
||||
s.PublishConsoleOutputFromDaemon("Pulling Docker container image, this could take a few minutes to complete...")
|
||||
} else {
|
||||
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
|
||||
}
|
||||
}
|
||||
|
||||
s.Log().Info("registering event listeners: console, state, resources...")
|
||||
s.Environment.Events().On(environment.ConsoleOutputEvent, &console)
|
||||
s.Environment.Events().On(environment.StateChangeEvent, &state)
|
||||
s.Environment.Events().On(environment.ResourceEvent, &stats)
|
||||
for _, evt := range dockerEvents {
|
||||
s.Environment.Events().On(evt, &docker)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))")
|
||||
@@ -78,7 +129,7 @@ func (s *Server) onConsoleOutput(data string) {
|
||||
|
||||
s.Log().WithFields(log.Fields{
|
||||
"match": l.String(),
|
||||
"against": data,
|
||||
"against": strconv.QuoteToASCII(data),
|
||||
}).Debug("detected server in running state based on console line output")
|
||||
|
||||
// If the specific line of output is one that would mark the server as started,
|
||||
|
||||
@@ -37,12 +37,6 @@ func LoadDirectory() error {
|
||||
return errors.New(rerr.String())
|
||||
}
|
||||
|
||||
log.Debug("retrieving cached server states from disk")
|
||||
states, err := getServerStates()
|
||||
if err != nil {
|
||||
log.WithField("error", errors.WithStack(err)).Error("failed to retrieve locally cached server states from disk, assuming all servers in offline state")
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
log.WithField("total_configs", len(configs)).Info("processing servers returned by the API")
|
||||
|
||||
@@ -59,11 +53,6 @@ func LoadDirectory() error {
|
||||
return
|
||||
}
|
||||
|
||||
if state, exists := states[s.Id()]; exists {
|
||||
s.Log().WithField("state", state).Debug("found existing server state in cache file; re-instantiating server state")
|
||||
s.SetState(state)
|
||||
}
|
||||
|
||||
servers.Add(s)
|
||||
})
|
||||
}
|
||||
@@ -97,6 +86,9 @@ func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.resources = ResourceUsage{}
|
||||
defaults.Set(&s.resources)
|
||||
|
||||
s.Archiver = Archiver{Server: s}
|
||||
s.Filesystem = Filesystem{Server: s}
|
||||
|
||||
@@ -119,6 +111,7 @@ func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
|
||||
} else {
|
||||
s.Environment = env
|
||||
s.StartEventListeners()
|
||||
s.Throttler().StartTimer()
|
||||
}
|
||||
|
||||
// Forces the configuration to be synced with the panel.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"os"
|
||||
"time"
|
||||
@@ -87,6 +88,10 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
|
||||
|
||||
switch action {
|
||||
case PowerActionStart:
|
||||
if s.GetState() != environment.ProcessOfflineState {
|
||||
return ErrIsRunning
|
||||
}
|
||||
|
||||
// Run the pre-boot logic for the server before processing the environment start.
|
||||
if err := s.onBeforeStart(); err != nil {
|
||||
return err
|
||||
@@ -94,7 +99,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
|
||||
|
||||
return s.Environment.Start()
|
||||
case PowerActionStop:
|
||||
// We're specificially waiting for the process to be stopped here, otherwise the lock is released
|
||||
// We're specifically waiting for the process to be stopped here, otherwise the lock is released
|
||||
// too soon, and you can rack up all sorts of issues.
|
||||
return s.Environment.WaitForStop(10*60, true)
|
||||
case PowerActionRestart:
|
||||
@@ -134,21 +139,27 @@ func (s *Server) onBeforeStart() error {
|
||||
// Disallow start & restart if the server is suspended. Do this check after performing a sync
|
||||
// action with the Panel to ensure that we have the most up-to-date information for that server.
|
||||
if s.IsSuspended() {
|
||||
return new(suspendedError)
|
||||
return ErrSuspended
|
||||
}
|
||||
|
||||
// Ensure we sync the server information with the environment so that any new environment variables
|
||||
// and process resource limits are correctly applied.
|
||||
s.SyncWithEnvironment()
|
||||
|
||||
// If a server has unlimited disk space, we don't care enough to block the startup to check remaining.
|
||||
// However, we should trigger a size anyway, as it'd be good to kick it off for other processes.
|
||||
if s.DiskSpace() <= 0 {
|
||||
s.Filesystem.HasSpaceAvailable(true)
|
||||
} else {
|
||||
s.PublishConsoleOutputFromDaemon("Checking server disk space usage, this could take a few seconds...")
|
||||
if !s.Filesystem.HasSpaceAvailable(false) {
|
||||
return errors.New("cannot start server, not enough disk space available")
|
||||
}
|
||||
}
|
||||
|
||||
// Update the configuration files defined for the server before beginning the boot process.
|
||||
// This process executes a bunch of parallel updates, so we just block until that process
|
||||
// is completee. Any errors as a result of this will just be bubbled out in the logger,
|
||||
// is complete. Any errors as a result of this will just be bubbled out in the logger,
|
||||
// we don't need to actively do anything about it at this point, worst comes to worst the
|
||||
// server starts in a weird state and the user can manually adjust.
|
||||
s.PublishConsoleOutputFromDaemon("Updating process configuration files...")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"sync"
|
||||
)
|
||||
@@ -37,10 +36,10 @@ func (s *Server) Proc() *ResourceUsage {
|
||||
|
||||
func (s *Server) emitProcUsage() {
|
||||
s.resources.mu.RLock()
|
||||
defer s.resources.mu.RUnlock()
|
||||
|
||||
b, _ := json.Marshal(s.resources)
|
||||
s.Events().Publish(StatsEvent, string(b))
|
||||
if err := s.Events().PublishJson(StatsEvent, s.resources); err != nil {
|
||||
s.Log().WithField("error", err).Warn("error while emitting server resource usage to listeners")
|
||||
}
|
||||
s.resources.mu.RUnlock()
|
||||
}
|
||||
|
||||
// Returns the servers current state.
|
||||
|
||||
@@ -22,7 +22,7 @@ type Server struct {
|
||||
sync.RWMutex
|
||||
emitterLock sync.Mutex
|
||||
powerLock *semaphore.Weighted
|
||||
throttleLock sync.RWMutex
|
||||
throttleLock sync.Mutex
|
||||
|
||||
// Maintains the configuration for the server. This is the data that gets returned by the Panel
|
||||
// such as build settings and container images.
|
||||
@@ -137,6 +137,7 @@ func (s *Server) SyncWithConfiguration(cfg *api.ServerConfigurationResponse) err
|
||||
// the process isn't just terminated when a user requests it be stopped.
|
||||
if e, ok := s.Environment.(*docker.Environment); ok {
|
||||
s.Log().Debug("syncing stop configuration with configured docker environment")
|
||||
e.SetImage(s.Config().Container.Image)
|
||||
e.SetStopConfiguration(&cfg.ProcessConfiguration.Stop)
|
||||
}
|
||||
|
||||
@@ -144,7 +145,7 @@ func (s *Server) SyncWithConfiguration(cfg *api.ServerConfigurationResponse) err
|
||||
}
|
||||
|
||||
// Reads the log file for a server up to a specified number of bytes.
|
||||
func (s *Server) ReadLogfile(len int64) ([]string, error) {
|
||||
func (s *Server) ReadLogfile(len int) ([]string, error) {
|
||||
return s.Environment.Readlog(len)
|
||||
}
|
||||
|
||||
@@ -156,7 +157,7 @@ func (s *Server) IsBootable() bool {
|
||||
return exists
|
||||
}
|
||||
|
||||
// Initalizes a server instance. This will run through and ensure that the environment
|
||||
// Initializes a server instance. This will run through and ensure that the environment
|
||||
// for the server is setup, and that all of the necessary files are created.
|
||||
func (s *Server) CreateEnvironment() error {
|
||||
// Ensure the data directory exists before getting too far through this process.
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
var stateMutex sync.Mutex
|
||||
|
||||
// Returns the state of the servers.
|
||||
func getServerStates() (map[string]string, error) {
|
||||
func CachedServerStates() (map[string]string, error) {
|
||||
// Request a lock after we check if the file exists.
|
||||
stateMutex.Lock()
|
||||
defer stateMutex.Unlock()
|
||||
@@ -78,8 +78,8 @@ func (s *Server) SetState(state string) error {
|
||||
|
||||
// Emit the event to any listeners that are currently registered.
|
||||
if prevState != state {
|
||||
s.Log().WithField("status", s.Proc().State).Debug("saw server status change event")
|
||||
s.Events().Publish(StatusEvent, s.Proc().State)
|
||||
s.Log().WithField("status", s.Proc().getInternalState()).Debug("saw server status change event")
|
||||
s.Events().Publish(StatusEvent, s.Proc().getInternalState())
|
||||
}
|
||||
|
||||
// Persist this change to the disk immediately so that should the Daemon be stopped or
|
||||
|
||||
@@ -143,6 +143,7 @@ func (s *Server) SyncWithEnvironment() {
|
||||
} else {
|
||||
// Checks if the server is now in a suspended state. If so and a server process is currently running it
|
||||
// will be gracefully stopped (and terminated if it refuses to stop).
|
||||
if s.GetState() != environment.ProcessOfflineState {
|
||||
s.Log().Info("server suspended with running process state, terminating now")
|
||||
|
||||
go func(s *Server) {
|
||||
@@ -152,3 +153,4 @@ func (s *Server) SyncWithEnvironment() {
|
||||
}(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ type FileSystem struct {
|
||||
UUID string
|
||||
Permissions []string
|
||||
ReadOnly bool
|
||||
User SftpUser
|
||||
User User
|
||||
Cache *cache.Cache
|
||||
|
||||
PathValidator func(fs FileSystem, p string) (string, error)
|
||||
@@ -351,7 +351,7 @@ func (fs FileSystem) Filelist(request *sftp.Request) (sftp.ListerAt, error) {
|
||||
default:
|
||||
// Before adding readlink support we need to evaluate any potential security risks
|
||||
// as a result of navigating around to a location that is outside the home directory
|
||||
// for the logged in user. I don't forsee it being much of a problem, but I do want to
|
||||
// for the logged in user. I don't foresee it being much of a problem, but I do want to
|
||||
// check it out before slapping some code here. Until then, we'll just return an
|
||||
// unsupported response code.
|
||||
return nil, sftp.ErrSshFxOpUnsupported
|
||||
|
||||
@@ -27,7 +27,7 @@ type Settings struct {
|
||||
BindAddress string
|
||||
}
|
||||
|
||||
type SftpUser struct {
|
||||
type User struct {
|
||||
Uid int
|
||||
Gid int
|
||||
}
|
||||
@@ -36,7 +36,7 @@ type Server struct {
|
||||
cache *cache.Cache
|
||||
|
||||
Settings Settings
|
||||
User SftpUser
|
||||
User User
|
||||
|
||||
PathValidator func(fs FileSystem, p string) (string, error)
|
||||
DiskSpaceValidator func(fs FileSystem) bool
|
||||
|
||||
@@ -12,7 +12,7 @@ var noMatchingServerError = errors.New("no matching server with that UUID was fo
|
||||
|
||||
func Initialize(config config.SystemConfiguration) error {
|
||||
s := &Server{
|
||||
User: SftpUser{
|
||||
User: User{
|
||||
Uid: config.User.Uid,
|
||||
Gid: config.User.Gid,
|
||||
},
|
||||
@@ -66,7 +66,7 @@ func validateDiskSpace(fs FileSystem) bool {
|
||||
return s.Filesystem.HasSpaceAvailable(true)
|
||||
}
|
||||
|
||||
// Validates a set of credentials for a SFTP login aganist Pterodactyl Panel and returns
|
||||
// Validates a set of credentials for a SFTP login against Pterodactyl Panel and returns
|
||||
// the server's UUID if the credentials were valid.
|
||||
func validateCredentials(c api.SftpAuthRequest) (*api.SftpAuthResponse, error) {
|
||||
f := log.Fields{"subsystem": "sftp", "username": c.User, "ip": c.IP}
|
||||
|
||||
20
system/bool.go
Normal file
20
system/bool.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package system
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
type AtomicBool struct {
|
||||
flag uint32
|
||||
}
|
||||
|
||||
func (ab *AtomicBool) Set(v bool) {
|
||||
i := 0
|
||||
if v {
|
||||
i = 1
|
||||
}
|
||||
|
||||
atomic.StoreUint32(&ab.flag, uint32(i))
|
||||
}
|
||||
|
||||
func (ab *AtomicBool) Get() bool {
|
||||
return atomic.LoadUint32(&ab.flag) == 1
|
||||
}
|
||||
Reference in New Issue
Block a user