Merge branch 'develop' into feature/server-mounts

This commit is contained in:
Matthew Penner
2020-07-04 15:17:59 -06:00
committed by GitHub
38 changed files with 873 additions and 378 deletions

View File

@@ -2,10 +2,10 @@ package server
import (
"bufio"
"github.com/apex/log"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/server/backup"
"go.uber.org/zap"
"os"
"path"
)
@@ -17,16 +17,15 @@ func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, suc
rerr, err := r.SendBackupStatus(uuid, ad.ToRequest(successful))
if rerr != nil || err != nil {
if err != nil {
zap.S().Errorw(
"failed to notify panel of backup status due to internal code error",
zap.String("backup", s.Uuid),
zap.Error(err),
)
s.Log().WithFields(log.Fields{
"backup": uuid,
"error": err,
}).Error("failed to notify panel of backup status due to internal code error")
return err
}
zap.S().Warnw(rerr.String(), zap.String("backup", uuid))
s.Log().WithField("backup", uuid).Warn(rerr.String())
return errors.New(rerr.String())
}
@@ -66,7 +65,7 @@ func (s *Server) GetIncludedBackupFiles(ignored []string) (*backup.IncludedFiles
// of the server files directory, and use that to generate the backup.
if len(ignored) == 0 {
if i, err := s.getServerwideIgnoredFiles(); err != nil {
zap.S().Warnw("failed to retrieve server ignored files", zap.String("server", s.Uuid), zap.Error(err))
s.Log().WithField("error", err).Warn("failed to retrieve ignored files listing for server")
} else {
ignored = i
}
@@ -89,7 +88,10 @@ func (s *Server) Backup(b backup.BackupInterface) error {
ad, err := b.Generate(inc, s.Filesystem.Path())
if err != nil {
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
zap.S().Warnw("failed to notify panel of failed backup state", zap.String("backup", b.Identifier()), zap.Error(err))
s.Log().WithFields(log.Fields{
"backup": b.Identifier(),
"error": err,
}).Warn("failed to notify panel of failed backup state")
}
return errors.WithStack(err)
@@ -112,4 +114,4 @@ func (s *Server) Backup(b backup.BackupInterface) error {
})
return nil
}
}

View File

@@ -3,9 +3,9 @@ package backup
import (
"archive/tar"
"context"
"github.com/apex/log"
gzip "github.com/klauspost/pgzip"
"github.com/remeh/sizedwaitgroup"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"io"
"os"
@@ -67,7 +67,7 @@ func (a *Archive) Create(dest string, ctx context.Context) error {
// Attempt to remove the archive if there is an error, report that error to
// the logger if it fails.
if rerr := os.Remove(dest); rerr != nil && !os.IsNotExist(rerr) {
zap.S().Warnw("failed to delete corrupted backup archive", zap.String("location", dest))
log.WithField("location", dest).Warn("failed to delete corrupted backup archive")
}
return err

View File

@@ -3,10 +3,10 @@ package backup
import (
"crypto/sha256"
"encoding/hex"
"github.com/apex/log"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"go.uber.org/zap"
"io"
"os"
"path"
@@ -121,7 +121,10 @@ func (b *Backup) Details() *ArchiveDetails {
resp, err := b.Checksum()
if err != nil {
zap.S().Errorw("failed to calculate checksum for backup", zap.String("backup", b.Uuid), zap.Error(err))
log.WithFields(log.Fields{
"backup": b.Identifier(),
"error": err,
}).Error("failed to calculate checksum for backup")
}
checksum = hex.EncodeToString(resp)

View File

@@ -3,7 +3,7 @@ package backup
import (
"context"
"fmt"
"go.uber.org/zap"
"github.com/apex/log"
"io"
"net/http"
"os"
@@ -76,8 +76,11 @@ func (s *S3Backup) generateRemoteRequest(rc io.ReadCloser) (*http.Response, erro
}
r.Body = rc
zap.S().Debugw("uploading backup to remote S3 endpoint", zap.String("endpoint", s.PresignedUrl), zap.Any("headers", r.Header))
log.WithFields(log.Fields{
"endpoint": s.PresignedUrl,
"headers": r.Header,
}).Debug("uploading backup to remote S3 endpoint")
return http.DefaultClient.Do(r)
}

View File

@@ -2,7 +2,6 @@ package server
import (
"github.com/pterodactyl/wings/parser"
"go.uber.org/zap"
"sync"
)
@@ -17,15 +16,15 @@ func (s *Server) UpdateConfigurationFiles() {
go func(f parser.ConfigurationFile, server *Server) {
defer wg.Done()
p, err := s.Filesystem.SafePath(f.FileName)
p, err := server.Filesystem.SafePath(f.FileName)
if err != nil {
zap.S().Errorw("failed to generate safe path for configuration file", zap.String("server", server.Uuid), zap.Error(err))
server.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
return
}
if err := f.Parse(p, false); err != nil {
zap.S().Errorw("failed to parse and update server configuration file", zap.String("server", server.Uuid), zap.Error(err))
server.Log().WithField("error", err).Error("failed to parse and update server configuration file")
}
}(v, s)
}

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/config"
"go.uber.org/zap"
"time"
)
@@ -27,15 +26,13 @@ type CrashDetection struct {
//
// If the server is determined to have crashed, the process will be restarted and the
// counter for the server will be incremented.
//
// @todo output event to server console
func (s *Server) handleServerCrash() error {
// No point in doing anything here if the server isn't currently offline, there
// is no reason to do a crash detection event. If the server crash detection is
// disabled we want to skip anything after this as well.
if s.GetState() != ProcessOfflineState || !s.CrashDetection.Enabled {
if !s.CrashDetection.Enabled {
zap.S().Debugw("server triggered crash detection but handler is disabled for server process", zap.String("server", s.Uuid))
s.Log().Debug("server triggered crash detection but handler is disabled for server process")
s.PublishConsoleOutputFromDaemon("Server detected as crashed; crash detection is disabled for this instance.")
}
@@ -51,7 +48,7 @@ func (s *Server) handleServerCrash() error {
// If the system is not configured to detect a clean exit code as a crash, and the
// crash is not the result of the program running out of memory, do nothing.
if exitCode == 0 && !oomKilled && !config.Get().System.DetectCleanExitAsCrash {
zap.S().Debugw("server exited with successful code; system configured to not detect as crash", zap.String("server", s.Uuid))
s.Log().Debug("server exited with successful exit code; system is configured to not detect this as a crash")
return nil
}

View File

@@ -6,6 +6,7 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
@@ -15,7 +16,6 @@ import (
"github.com/pkg/errors"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"go.uber.org/zap"
"io"
"os"
"strconv"
@@ -122,11 +122,13 @@ func (d *DockerEnvironment) InSituUpdate() error {
return errors.WithStack(err)
}
ctx, _ := context.WithTimeout(context.Background(), time.Second * 10)
u := container.UpdateConfig{
Resources: d.getResourcesForServer(),
}
if _, err := d.Client.ContainerUpdate(context.Background(), d.Server.Uuid, u); err != nil {
d.Server.Log().WithField("limits", fmt.Sprintf("%+v", u.Resources)).Debug("updating server container on-the-fly with passed limits")
if _, err := d.Client.ContainerUpdate(ctx, d.Server.Uuid, u); err != nil {
return errors.WithStack(err)
}
@@ -141,7 +143,7 @@ func (d *DockerEnvironment) InSituUpdate() error {
// state. This ensures that unexpected container deletion while Wings is running does
// not result in the server becoming unbootable.
func (d *DockerEnvironment) OnBeforeStart() error {
zap.S().Infow("syncing server configuration with Panel", zap.String("server", d.Server.Uuid))
d.Server.Log().Info("syncing server configuration with panel")
if err := d.Server.Sync(); err != nil {
return err
}
@@ -182,6 +184,10 @@ func (d *DockerEnvironment) Start() error {
// that point.
defer func() {
if sawError {
// If we don't set it to stopping first, you'll trigger crash detection which
// we don't want to do at this point since it'll just immediately try to do the
// exact same action that lead to it crashing in the first place...
d.Server.SetState(ProcessStoppingState)
d.Server.SetState(ProcessOfflineState)
}
}()
@@ -248,8 +254,8 @@ func (d *DockerEnvironment) Start() error {
return errors.WithStack(err)
}
opts := types.ContainerStartOptions{}
if err := d.Client.ContainerStart(context.Background(), d.Server.Uuid, opts); err != nil {
ctx, _ := context.WithTimeout(context.Background(), time.Second * 10)
if err := d.Client.ContainerStart(ctx, d.Server.Uuid, types.ContainerStartOptions{}); err != nil {
return errors.WithStack(err)
}
@@ -418,7 +424,7 @@ func (d *DockerEnvironment) Attach() error {
d.attached = true
go func() {
if err := d.EnableResourcePolling(); err != nil {
zap.S().Warnw("failed to enabled resource polling on server", zap.String("server", d.Server.Uuid), zap.Error(errors.WithStack(err)))
d.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to enable resource polling on server")
}
}()
@@ -466,7 +472,7 @@ func (d *DockerEnvironment) FollowConsoleOutput() error {
}
if err := s.Err(); err != nil {
zap.S().Warnw("error processing scanner line in console output", zap.String("server", d.Server.Uuid), zap.Error(err))
d.Server.Log().WithField("error", err).Warn("error processing scanner line in console output")
}
}(reader)
@@ -496,7 +502,7 @@ func (d *DockerEnvironment) EnableResourcePolling() error {
if err := dec.Decode(&v); err != nil {
if err != io.EOF {
zap.S().Warnw("encountered error processing server stats; stopping collection", zap.Error(err))
d.Server.Log().WithField("error", err).Warn("encountered error processing server stats, stopping collection")
}
d.DisableResourcePolling()
@@ -547,17 +553,51 @@ func (d *DockerEnvironment) DisableResourcePolling() error {
return errors.WithStack(err)
}
// Pulls the image from Docker.
// Pulls the image from Docker. If there is an error while pulling the image from the source
// but the image already exists locally, we will report that error to the logger but continue
// with the process.
//
// The reasoning behind this is that Quay has had some serious outages as of late, and we don't
// need to block all of the servers from booting just because of that. I'd imagine in a lot of
// cases an outage shouldn't affect users too badly. It'll at least keep existing servers working
// correctly if anything.
//
// @todo handle authorization & local images
func (d *DockerEnvironment) ensureImageExists(c *client.Client) error {
out, err := c.ImagePull(context.Background(), d.Server.Container.Image, types.ImagePullOptions{All: false})
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
// an image. Let me know when I am inevitably wrong here...
ctx, _ := context.WithTimeout(context.Background(), time.Minute*15)
out, err := c.ImagePull(ctx, d.Server.Container.Image, types.ImagePullOptions{All: false})
if err != nil {
images, ierr := c.ImageList(ctx, types.ImageListOptions{})
if ierr != nil {
// Well damn, something has gone really wrong here, just go ahead and abort there
// isn't much anything we can do to try and self-recover from this.
return ierr
}
for _, img := range images {
for _, t := range img.RepoTags {
if t == d.Server.Container.Image {
d.Server.Log().WithFields(log.Fields{
"image": d.Server.Container.Image,
"error": errors.New(err.Error()),
}).Warn("unable to pull requested image from remote source, however the image exists locally")
// Okay, we found a matching container image, in that case just go ahead and return
// from this function, since there is nothing else we need to do here.
return nil
}
}
}
return err
}
defer out.Close()
zap.S().Debugw("pulling docker image... this could take a bit of time", zap.String("image", d.Server.Container.Image))
log.WithField("image", d.Server.Container.Image).Debug("pulling docker image... this could take a bit of time")
// I'm not sure what the best approach here is, but this will block execution until the image
// is done being pulled, which is what we need.
@@ -613,7 +653,7 @@ func (d *DockerEnvironment) Create() error {
ExposedPorts: d.exposedPorts(),
Image: d.Server.Container.Image,
Env: d.environmentVariables(),
Env: d.Server.GetEnvironmentVariables(),
Labels: map[string]string{
"Service": "Pterodactyl",
@@ -776,36 +816,6 @@ func (d *DockerEnvironment) parseLogToStrings(b []byte) ([]string, error) {
return out, nil
}
// Returns the environment variables for a server in KEY="VALUE" form.
func (d *DockerEnvironment) environmentVariables() []string {
zone, _ := time.Now().In(time.Local).Zone()
var out = []string{
fmt.Sprintf("TZ=%s", zone),
fmt.Sprintf("STARTUP=%s", d.Server.Invocation),
fmt.Sprintf("SERVER_MEMORY=%d", d.Server.Build.MemoryLimit),
fmt.Sprintf("SERVER_IP=%s", d.Server.Allocations.DefaultMapping.Ip),
fmt.Sprintf("SERVER_PORT=%d", d.Server.Allocations.DefaultMapping.Port),
}
eloop:
for k, v := range d.Server.EnvVars {
for _, e := range out {
if strings.HasPrefix(e, strings.ToUpper(k)) {
continue eloop
}
}
out = append(out, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
}
return out
}
func (d *DockerEnvironment) volumes() map[string]struct{} {
return nil
}
// Converts the server allocation mappings into a format that can be understood
// by Docker.
func (d *DockerEnvironment) portBindings() nat.PortMap {

View File

@@ -11,7 +11,6 @@ import (
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/server/backup"
ignore "github.com/sabhiram/go-gitignore"
"go.uber.org/zap"
"io"
"io/ioutil"
"os"
@@ -134,7 +133,7 @@ func (fs *Filesystem) HasSpaceAvailable() bool {
// the cache once we've gotten it.
size, err := fs.DirectorySize("/")
if err != nil {
zap.S().Warnw("failed to determine directory size", zap.String("server", fs.Server.Uuid), zap.Error(err))
fs.Server.Log().WithField("error", err).Warn("failed to determine root server directory size")
}
// Always cache the size, even if there is an error. We want to always return that value

View File

@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"context"
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
@@ -11,28 +12,44 @@ import (
"github.com/pkg/errors"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"go.uber.org/zap"
"golang.org/x/sync/semaphore"
"html/template"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"sync"
"time"
)
// Executes the installation stack for a server process. Bubbles any errors up to the calling
// function which should handle contacting the panel to notify it of the server state.
func (s *Server) Install() error {
//
// Pass true as the first arugment in order to execute a server sync before the process to
// ensure the latest information is used.
func (s *Server) Install(sync bool) error {
if sync {
s.Log().Info("syncing server state with remote source before executing installation process")
if err := s.Sync(); err != nil {
return err
}
}
err := s.internalInstall()
zap.S().Debugw("notifying panel of server install state", zap.String("server", s.Uuid))
s.Log().Debug("notifying panel of server install state")
if serr := s.SyncInstallState(err == nil); serr != nil {
zap.S().Warnw(
"failed to notify panel of server install state",
zap.String("server", s.Uuid),
zap.Bool("was_successful", err == nil),
zap.Error(serr),
)
l := s.Log().WithField("was_successful", err == nil)
// If the request was successful but there was an error with this request, attach the
// error to this log entry. Otherwise ignore it in this log since whatever is calling
// this function should handle the error and will end up logging the same one.
if err == nil {
l.WithField("error", serr)
}
l.Warn("failed to notify panel of server install state")
}
return err
@@ -42,13 +59,13 @@ func (s *Server) Install() error {
// does not touch any existing files for the server, other than what the script modifies.
func (s *Server) Reinstall() error {
if s.GetState() != ProcessOfflineState {
zap.S().Debugw("waiting for server instance to enter a stopped state", zap.String("server", s.Uuid))
s.Log().Debug("waiting for server instance to enter a stopped state")
if err := s.Environment.WaitForStop(10, true); err != nil {
return err
}
}
return s.Install()
return s.Install(true)
}
// Internal installation function used to simplify reporting back to the Panel.
@@ -67,14 +84,12 @@ func (s *Server) internalInstall() error {
return errors.WithStack(err)
}
zap.S().Infow("beginning installation process for server", zap.String("server", s.Uuid))
s.Log().Info("beginning installation process for server")
if err := p.Run(); err != nil {
return err
}
zap.S().Infow("completed installation process for server", zap.String("server", s.Uuid))
s.Log().Info("completed installation process for server")
return nil
}
@@ -82,8 +97,8 @@ type InstallationProcess struct {
Server *Server
Script *api.InstallationScript
client *client.Client
mutex *sync.Mutex
client *client.Client
context context.Context
}
// Generates a new installation process struct that will be used to create containers,
@@ -92,27 +107,76 @@ func NewInstallationProcess(s *Server, script *api.InstallationScript) (*Install
proc := &InstallationProcess{
Script: script,
Server: s,
mutex: &sync.Mutex{},
}
ctx, cancel := context.WithCancel(context.Background())
s.installer.cancel = &cancel
if c, err := client.NewClientWithOpts(client.FromEnv); err != nil {
return nil, errors.WithStack(err)
} else {
proc.client = c
proc.context = ctx
}
return proc, nil
}
// Try to obtain an exclusive lock on the installation process for the server. Waits up to 10
// seconds before aborting with a context timeout.
func (s *Server) acquireInstallationLock() error {
if s.installer.sem == nil {
s.installer.sem = semaphore.NewWeighted(1)
}
ctx, _ := context.WithTimeout(context.Background(), time.Second*10)
return s.installer.sem.Acquire(ctx, 1)
}
// Determines if the server is actively running the installation process by checking the status
// of the semaphore lock.
func (s *Server) IsInstalling() bool {
if s.installer.sem == nil {
return false
}
if s.installer.sem.TryAcquire(1) {
// If we made it into this block it means we were able to obtain an exclusive lock
// on the semaphore. In that case, go ahead and release that lock immediately, and
// return false.
s.installer.sem.Release(1)
return false
}
return true
}
// Aborts the server installation process by calling the cancel function on the installer
// context.
func (s *Server) AbortInstallation() {
if !s.IsInstalling() {
return
}
if s.installer.cancel != nil {
cancel := *s.installer.cancel
s.Log().Warn("aborting running installation process")
cancel()
}
}
// Removes the installer container for the server.
func (ip *InstallationProcess) RemoveContainer() {
err := ip.client.ContainerRemove(context.Background(), ip.Server.Uuid + "_installer", types.ContainerRemoveOptions{
err := ip.client.ContainerRemove(ip.context, ip.Server.Uuid+"_installer", types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
if err != nil && !client.IsErrNotFound(err) {
zap.S().Warnw("failed to delete server installer container", zap.String("server", ip.Server.Uuid), zap.Error(errors.WithStack(err)))
ip.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to delete server install container")
}
}
@@ -122,6 +186,20 @@ func (ip *InstallationProcess) RemoveContainer() {
// Once the container finishes installing the results will be stored in an installation
// log in the server's configuration directory.
func (ip *InstallationProcess) Run() error {
ip.Server.Log().Debug("acquiring installation process lock")
if err := ip.Server.acquireInstallationLock(); err != nil {
return err
}
// We now have an exclusive lock on this installation process. Ensure that whenever this
// process is finished that the semaphore is released so that other processes and be executed
// without encounting a wait timeout.
defer func() {
ip.Server.Log().Debug("releasing installation process lock")
ip.Server.installer.sem.Release(1)
ip.Server.installer.cancel = nil
}()
installPath, err := ip.BeforeExecute()
if err != nil {
return err
@@ -137,7 +215,7 @@ func (ip *InstallationProcess) Run() error {
// If this step fails, log a warning but don't exit out of the process. This is completely
// internal to the daemon's functionality, and does not affect the status of the server itself.
if err := ip.AfterExecute(cid); err != nil {
zap.S().Warnw("failed to complete after-execute step of installation process", zap.String("server", ip.Server.Uuid), zap.Error(err))
ip.Server.Log().WithField("error", err).Warn("failed to complete after-execute step of installation process")
}
return nil
@@ -181,7 +259,7 @@ func (ip *InstallationProcess) writeScriptToDisk() (string, error) {
// Pulls the docker image to be used for the installation container.
func (ip *InstallationProcess) pullInstallationImage() error {
r, err := ip.client.ImagePull(context.Background(), ip.Script.ContainerImage, types.ImagePullOptions{})
r, err := ip.client.ImagePull(ip.context, ip.Script.ContainerImage, types.ImagePullOptions{})
if err != nil {
return errors.WithStack(err)
}
@@ -189,7 +267,7 @@ func (ip *InstallationProcess) pullInstallationImage() error {
// Block continuation until the image has been pulled successfully.
scanner := bufio.NewScanner(r)
for scanner.Scan() {
zap.S().Debugw(scanner.Text())
log.Debug(scanner.Text())
}
if err := scanner.Err(); err != nil {
@@ -235,7 +313,7 @@ func (ip *InstallationProcess) BeforeExecute() (string, error) {
Force: true,
}
if err := ip.client.ContainerRemove(context.Background(), ip.Server.Uuid+"_installer", opts); err != nil {
if err := ip.client.ContainerRemove(ip.context, ip.Server.Uuid+"_installer", opts); err != nil {
if !client.IsErrNotFound(err) {
e = append(e, err)
}
@@ -262,11 +340,10 @@ func (ip *InstallationProcess) GetLogPath() string {
// process to store in the server configuration directory, and then destroys the associated
// installation container.
func (ip *InstallationProcess) AfterExecute(containerId string) error {
ctx := context.Background()
defer ip.RemoveContainer()
zap.S().Debugw("pulling installation logs for server", zap.String("server", ip.Server.Uuid), zap.String("container_id", containerId))
reader, err := ip.client.ContainerLogs(ctx, containerId, types.ContainerLogsOptions{
ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server")
reader, err := ip.client.ContainerLogs(ip.context, containerId, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: false,
@@ -283,7 +360,37 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
defer f.Close()
// We write the contents of the container output to a more "permanent" file so that they
// can be referenced after this container is deleted.
// can be referenced after this container is deleted. We'll also include the environment
// variables passed into the container to make debugging things a little easier.
ip.Server.Log().WithField("path", ip.GetLogPath()).Debug("writing most recent installation logs to disk")
tmpl, err := template.New("header").Parse(`Pterodactyl Server Installation Log
|
| Details
| ------------------------------
Server UUID: {{.Server.Uuid}}
Container Image: {{.Script.ContainerImage}}
Container Entrypoint: {{.Script.Entrypoint}}
|
| Environment Variables
| ------------------------------
{{ range $key, $value := .Server.GetEnvironmentVariables }} {{ $value }}
{{ end }}
|
| Script Output
| ------------------------------
`)
if err != nil {
return errors.WithStack(err)
}
if err := tmpl.Execute(f, ip); err != nil {
return errors.WithStack(err)
}
if _, err := io.Copy(f, reader); err != nil {
return errors.WithStack(err)
}
@@ -293,14 +400,6 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
// Executes the installation process inside a specially created docker container.
func (ip *InstallationProcess) Execute(installPath string) (string, error) {
ctx := context.Background()
zap.S().Debugw(
"creating server installer container",
zap.String("server", ip.Server.Uuid),
zap.String("script_path", installPath+"/install.sh"),
)
conf := &container.Config{
Hostname: "installer",
AttachStdout: true,
@@ -348,34 +447,26 @@ func (ip *InstallationProcess) Execute(installPath string) (string, error) {
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
}
zap.S().Infow("creating installer container for server process", zap.String("server", ip.Server.Uuid))
r, err := ip.client.ContainerCreate(ctx, conf, hostConf, nil, ip.Server.Uuid+"_installer")
ip.Server.Log().WithField("install_script", installPath+"/install.sh").Info("creating install container for server process")
r, err := ip.client.ContainerCreate(ip.context, conf, hostConf, nil, ip.Server.Uuid+"_installer")
if err != nil {
return "", errors.WithStack(err)
}
zap.S().Infow(
"running installation script for server in container",
zap.String("server", ip.Server.Uuid),
zap.String("container_id", r.ID),
)
if err := ip.client.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}); err != nil {
ip.Server.Log().WithField("container_id", r.ID).Info("running installation script for server in container")
if err := ip.client.ContainerStart(ip.context, r.ID, types.ContainerStartOptions{}); err != nil {
return "", err
}
go func(id string) {
ip.Server.Events().Publish(DaemonMessageEvent, "Starting installation process, this could take a few minutes...")
if err := ip.StreamOutput(id); err != nil {
zap.S().Errorw(
"error handling streaming output for server install process",
zap.String("container_id", id),
zap.Error(err),
)
ip.Server.Log().WithField("error", err).Error("error while handling output stream for server install process")
}
ip.Server.Events().Publish(DaemonMessageEvent, "Installation process completed.")
}(r.ID)
sChann, eChann := ip.client.ContainerWait(ctx, r.ID, container.WaitConditionNotRunning)
sChann, eChann := ip.client.ContainerWait(ip.context, r.ID, container.WaitConditionNotRunning)
select {
case err := <-eChann:
if err != nil {
@@ -391,7 +482,7 @@ func (ip *InstallationProcess) Execute(installPath string) (string, error) {
// directory, as well as to a websocket listener so that the process can be viewed in
// the panel by administrators.
func (ip *InstallationProcess) StreamOutput(id string) error {
reader, err := ip.client.ContainerLogs(context.Background(), id, types.ContainerLogsOptions{
reader, err := ip.client.ContainerLogs(ip.context, id, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
@@ -409,12 +500,10 @@ func (ip *InstallationProcess) StreamOutput(id string) error {
}
if err := s.Err(); err != nil {
zap.S().Warnw(
"error processing scanner line in installation output for server",
zap.String("server", ip.Server.Uuid),
zap.String("container_id", id),
zap.Error(errors.WithStack(err)),
)
ip.Server.Log().WithFields(log.Fields{
"container_id": id,
"error": errors.WithStack(err),
}).Warn("error processing scanner line in installation output for server")
}
return nil

View File

@@ -1,8 +1,8 @@
package server
import (
"github.com/apex/log"
"github.com/pterodactyl/wings/api"
"go.uber.org/zap"
"strings"
)
@@ -28,9 +28,10 @@ func (s *Server) onConsoleOutput(data string) {
// set the server to that state. Only do this if the server is not currently stopped
// or stopping.
if s.GetState() == ProcessStartingState && strings.Contains(data, s.processConfiguration.Startup.Done) {
zap.S().Debugw(
"detected server in running state based on line output", zap.String("match", s.processConfiguration.Startup.Done), zap.String("against", data),
)
s.Log().WithFields(log.Fields{
"match": s.processConfiguration.Startup.Done,
"against": data,
}).Debug("detected server in running state based on console line output")
s.SetState(ProcessRunningState)
}

View File

@@ -1,16 +1,19 @@
package server
import (
"context"
"fmt"
"github.com/apex/log"
"github.com/creasty/defaults"
"github.com/patrickmn/go-cache"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"github.com/remeh/sizedwaitgroup"
"go.uber.org/zap"
"golang.org/x/sync/semaphore"
"math"
"os"
"strconv"
"strings"
"sync"
"time"
@@ -22,6 +25,36 @@ func GetServers() *Collection {
return servers
}
type EnvironmentVariables map[string]interface{}
// Ugly hacky function to handle environment variables that get passed through as not-a-string
// from the Panel. Ideally we'd just say only pass strings, but that is a fragile idea and if a
// string wasn't passed through you'd cause a crash or the server to become unavailable. For now
// try to handle the most likely values from the JSON and hope for the best.
func (ev EnvironmentVariables) Get(key string) string {
val, ok := ev[key]
if !ok {
return ""
}
switch val.(type) {
case int:
return strconv.Itoa(val.(int))
case int32:
return strconv.FormatInt(val.(int64), 10)
case int64:
return strconv.FormatInt(val.(int64), 10)
case float32:
return fmt.Sprintf("%f", val.(float32))
case float64:
return fmt.Sprintf("%f", val.(float64))
case bool:
return strconv.FormatBool(val.(bool))
}
return val.(string)
}
// High level definition for a server instance being controlled by Wings.
type Server struct {
// The unique identifier for the server that should be used when referencing
@@ -41,7 +74,7 @@ type Server struct {
// An array of environment variables that should be passed along to the running
// server process.
EnvVars map[string]string `json:"environment"`
EnvVars EnvironmentVariables `json:"environment"`
Allocations Allocations `json:"allocations"`
Build BuildSettings `json:"build"`
@@ -73,11 +106,27 @@ type Server struct {
// started, and then cached here.
processConfiguration *api.ProcessConfiguration
// Tracks the installation process for this server and prevents a server from running
// two installer processes at the same time. This also allows us to cancel a running
// installation process, for example when a server is deleted from the panel while the
// installer process is still running.
installer InstallerDetails
// Internal mutex used to block actions that need to occur sequentially, such as
// writing the configuration to the disk.
sync.RWMutex
}
type InstallerDetails struct {
// The cancel function for the installer. This will be a non-nil value while there
// is an installer running for the server.
cancel *context.CancelFunc
// Installer lock. You should obtain an exclusive lock on this context while running
// the installation process and release it when finished.
sem *semaphore.Weighted
}
// The build settings for a given server that impact docker container creation and
// resource limits for a server instance.
type BuildSettings struct {
@@ -196,13 +245,13 @@ func LoadDirectory() error {
s, err := FromConfiguration(data)
if err != nil {
zap.S().Errorw("failed to load server, skipping...", zap.String("server", uuid), zap.Error(err))
log.WithField("server", uuid).WithField("error", err).Error("failed to load server, skipping...")
return
}
if state, exists := states[s.Uuid]; exists {
s.SetState(state)
zap.S().Debugw("loaded server state from cache", zap.String("server", s.Uuid), zap.String("state", s.GetState()))
s.Log().WithField("state", s.GetState()).Debug("loaded server state from cache file")
}
servers.Add(s)
@@ -271,19 +320,23 @@ func (s *Server) GetEnvironmentVariables() []string {
}
eloop:
for k, v := range s.EnvVars {
for k := range s.EnvVars {
for _, e := range out {
if strings.HasPrefix(e, strings.ToUpper(k)) {
continue eloop
}
}
out = append(out, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
out = append(out, fmt.Sprintf("%s=%s", strings.ToUpper(k), s.EnvVars.Get(k)))
}
return out
}
func (s *Server) Log() *log.Entry {
return log.WithField("server", s.Uuid)
}
// Syncs the state of the server on the Panel with Wings. This ensures that we're always
// using the state of the server from the Panel and allows us to not require successful
// API calls to Wings to do things.

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/config"
"go.uber.org/zap"
"io"
"io/ioutil"
"os"
@@ -82,7 +81,7 @@ func (s *Server) SetState(state string) error {
s.State = state
// Emit the event to any listeners that are currently registered.
zap.S().Debugw("saw server status change event", zap.String("server", s.Uuid), zap.String("status", s.State))
s.Log().WithField("status", s.State).Debug("saw server status change event")
s.Events().Publish(StatusEvent, s.State)
// Release the lock as it is no longer needed for the following actions.
@@ -98,7 +97,7 @@ func (s *Server) SetState(state string) error {
// to the disk should we forget to do it elsewhere.
go func() {
if err := saveServerStates(); err != nil {
zap.S().Warnw("failed to write server states to disk", zap.Error(err))
s.Log().WithField("error", err).Warn("failed to write server states to disk")
}
}()
@@ -111,14 +110,14 @@ func (s *Server) SetState(state string) error {
// separate thread as to not block any actions currently taking place in the flow
// that called this function.
if (prevState == ProcessStartingState || prevState == ProcessRunningState) && s.GetState() == ProcessOfflineState {
zap.S().Infow("detected server as entering a potentially crashed state; running handler", zap.String("server", s.Uuid))
s.Log().Info("detected server as entering a crashed state; running crash handler")
go func(server *Server) {
if err := server.handleServerCrash(); err != nil {
if IsTooFrequentCrashError(err) {
zap.S().Infow("did not restart server after crash; occurred too soon after last", zap.String("server", server.Uuid))
server.Log().Info("did not restart server after crash; occurred too soon after the last")
} else {
zap.S().Errorw("failed to handle server crash state", zap.String("server", server.Uuid), zap.Error(err))
server.Log().WithField("error", err).Error("failed to handle server crash")
}
}
}(s)

View File

@@ -5,7 +5,6 @@ import (
"github.com/buger/jsonparser"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"go.uber.org/zap"
)
// Merges data passed through in JSON form into the existing server object.
@@ -34,6 +33,16 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
return errors.WithStack(err)
}
// Don't explode if we're setting CPU limits to 0. Mergo sees that as an empty value
// so it won't override the value we've passed through in the API call. However, we can
// safely assume that we're passing through valid data structures here. I foresee this
// backfiring at some point, but until then...
//
// We'll go ahead and do this with swap as well.
s.Build.CpuLimit = src.Build.CpuLimit
s.Build.Swap = src.Build.Swap
s.Build.DiskSpace = src.Build.DiskSpace
// Mergo can't quite handle this boolean value correctly, so for now we'll just
// handle this edge case manually since none of the other data passed through in this
// request is going to be boolean. Allegedly.
@@ -85,12 +94,9 @@ func (s *Server) runBackgroundActions() {
// Update the environment in place, allowing memory and CPU usage to be adjusted
// on the fly without the user needing to reboot (theoretically).
go func(server *Server) {
server.Log().Info("performing server limit modification on-the-fly")
if err := server.Environment.InSituUpdate(); err != nil {
zap.S().Warnw(
"failed to perform in-situ update of server environment",
zap.String("server", server.Uuid),
zap.Error(err),
)
server.Log().WithField("error", err).Warn("failed to perform on-the-fly update of the server environment")
}
}(s)
@@ -98,14 +104,10 @@ func (s *Server) runBackgroundActions() {
// yet, do it immediately.
go func(server *Server) {
if server.Suspended && server.GetState() != ProcessOfflineState {
zap.S().Infow("server suspended with running process state, terminating now", zap.String("server", server.Uuid))
server.Log().Info("server suspended with running process state, terminating now")
if err := server.Environment.WaitForStop(10, true); err != nil {
zap.S().Warnw(
"failed to stop server environment after seeing suspension",
zap.String("server", server.Uuid),
zap.Error(err),
)
server.Log().WithField("error", err).Warn("failed to terminate server environment after suspension")
}
}
}(s)