Compare commits

..

1 Commits

Author SHA1 Message Date
Pterodactyl CI
be5ad761ea bump version for release 2022-01-31 01:31:09 +00:00
25 changed files with 475 additions and 366 deletions

View File

@@ -6,7 +6,7 @@ build:
debug: debug:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1 sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof
# Runs a remotly debuggable session for Wings allowing an IDE to connect and target # Runs a remotly debuggable session for Wings allowing an IDE to connect and target
# different breakpoints. # different breakpoints.

View File

@@ -11,7 +11,6 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"runtime"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@@ -77,7 +76,6 @@ func init() {
// Flags specifically used when running the API. // Flags specifically used when running the API.
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default") rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on") rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt") rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate") rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
@@ -311,12 +309,6 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
profile, _ := cmd.Flags().GetBool("pprof") profile, _ := cmd.Flags().GetBool("pprof")
if profile { if profile {
if r, _ := cmd.Flags().GetInt("pprof-block-rate"); r > 0 {
runtime.SetBlockProfileRate(r)
}
// Catch at least 1% of mutex contention issues.
runtime.SetMutexProfileFraction(100)
profilePort, _ := cmd.Flags().GetInt("pprof-port") profilePort, _ := cmd.Flags().GetInt("pprof-port")
go func() { go func() {
http.ListenAndServe(fmt.Sprintf("localhost:%d", profilePort), nil) http.ListenAndServe(fmt.Sprintf("localhost:%d", profilePort), nil)

View File

@@ -73,9 +73,6 @@ func (e *Environment) ContainerInspect(ctx context.Context) (types.ContainerJSON
res, err := e.client.HTTPClient().Do(req) res, err := e.client.HTTPClient().Do(req)
if err != nil { if err != nil {
if res == nil {
return st, errdefs.Unknown(err)
}
return st, errdefs.FromStatusCode(err, res.StatusCode) return st, errdefs.FromStatusCode(err, res.StatusCode)
} }

View File

@@ -138,7 +138,9 @@ func (e *Environment) Start(ctx context.Context) error {
// You most likely want to be using WaitForStop() rather than this function, // You most likely want to be using WaitForStop() rather than this function,
// since this will return as soon as the command is sent, rather than waiting // since this will return as soon as the command is sent, rather than waiting
// for the process to be completed stopped. // for the process to be completed stopped.
func (e *Environment) Stop(ctx context.Context) error { //
// TODO: pass context through from the server instance.
func (e *Environment) Stop() error {
e.mu.RLock() e.mu.RLock()
s := e.meta.Stop s := e.meta.Stop
e.mu.RUnlock() e.mu.RUnlock()
@@ -162,7 +164,7 @@ func (e *Environment) Stop(ctx context.Context) error {
case "SIGTERM": case "SIGTERM":
signal = syscall.SIGTERM signal = syscall.SIGTERM
} }
return e.Terminate(ctx, signal) return e.Terminate(signal)
} }
// If the process is already offline don't switch it back to stopping. Just leave it how // If the process is already offline don't switch it back to stopping. Just leave it how
@@ -177,10 +179,8 @@ func (e *Environment) Stop(ctx context.Context) error {
return e.SendCommand(s.Value) return e.SendCommand(s.Value)
} }
// Allow the stop action to run for however long it takes, similar to executing a command t := time.Second * 30
// and using a different logic pathway to wait for the container to stop successfully. if err := e.client.ContainerStop(context.Background(), e.Id, &t); err != nil {
t := time.Duration(-1)
if err := e.client.ContainerStop(ctx, e.Id, &t); err != nil {
// If the container does not exist just mark the process as stopped and return without // If the container does not exist just mark the process as stopped and return without
// an error. // an error.
if client.IsErrNotFound(err) { if client.IsErrNotFound(err) {
@@ -198,66 +198,45 @@ func (e *Environment) Stop(ctx context.Context) error {
// command. If the server does not stop after seconds have passed, an error will // command. If the server does not stop after seconds have passed, an error will
// be returned, or the instance will be terminated forcefully depending on the // be returned, or the instance will be terminated forcefully depending on the
// value of the second argument. // value of the second argument.
// func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
// Calls to Environment.Terminate() in this function use the context passed if err := e.Stop(); err != nil {
// through since we don't want to prevent termination of the server instance
// just because the context.WithTimeout() has expired.
func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, terminate bool) error {
tctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
// If the parent context is canceled, abort the timed context for termination.
go func() {
select {
case <-ctx.Done():
cancel()
case <-tctx.Done():
// When the timed context is canceled, terminate this routine since we no longer
// need to worry about the parent routine being canceled.
break
}
}()
doTermination := func (s string) error {
e.log().WithField("step", s).WithField("duration", duration).Warn("container stop did not complete in time, terminating process...")
return e.Terminate(ctx, os.Kill)
}
// We pass through the timed context for this stop action so that if one of the
// internal docker calls fails to ever finish before we've exhausted the time limit
// the resources get cleaned up, and the exection is stopped.
if err := e.Stop(tctx); err != nil {
if terminate && errors.Is(err, context.DeadlineExceeded) {
return doTermination("stop")
}
return err return err
} }
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second)
defer cancel()
// Block the return of this function until the container as been marked as no // Block the return of this function until the container as been marked as no
// longer running. If this wait does not end by the time seconds have passed, // longer running. If this wait does not end by the time seconds have passed,
// attempt to terminate the container, or return an error. // attempt to terminate the container, or return an error.
ok, errChan := e.client.ContainerWait(tctx, e.Id, container.WaitConditionNotRunning) ok, errChan := e.client.ContainerWait(ctx, e.Id, container.WaitConditionNotRunning)
select { select {
case <-ctx.Done(): case <-ctx.Done():
if err := ctx.Err(); err != nil { if ctxErr := ctx.Err(); ctxErr != nil {
if terminate { if terminate {
return doTermination("parent-context") log.WithField("container_id", e.Id).Info("server did not stop in time, executing process termination")
return e.Terminate(os.Kill)
} }
return err
return ctxErr
} }
case err := <-errChan: case err := <-errChan:
// If the error stems from the container not existing there is no point in wasting // If the error stems from the container not existing there is no point in wasting
// CPU time to then try and terminate it. // CPU time to then try and terminate it.
if err == nil || client.IsErrNotFound(err) { if err != nil && !client.IsErrNotFound(err) {
return nil if terminate {
} l := log.WithField("container_id", e.Id)
if terminate { if errors.Is(err, context.DeadlineExceeded) {
if !errors.Is(err, context.DeadlineExceeded) { l.Warn("deadline exceeded for container stop; terminating process")
e.log().WithField("error", err).Warn("error while waiting for container stop; terminating process") } else {
l.WithField("error", err).Warn("error while waiting for container stop; terminating process")
}
return e.Terminate(os.Kill)
} }
return doTermination("wait") return errors.WrapIf(err, "environment/docker: error waiting on container to enter \"not-running\" state")
} }
return errors.WrapIf(err, "environment/docker: error waiting on container to enter \"not-running\" state")
case <-ok: case <-ok:
} }
@@ -265,8 +244,8 @@ func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, t
} }
// Terminate forcefully terminates the container using the signal provided. // Terminate forcefully terminates the container using the signal provided.
func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error { func (e *Environment) Terminate(signal os.Signal) error {
c, err := e.ContainerInspect(ctx) c, err := e.ContainerInspect(context.Background())
if err != nil { if err != nil {
// Treat missing containers as an okay error state, means it is obviously // Treat missing containers as an okay error state, means it is obviously
// already terminated at this point. // already terminated at this point.
@@ -291,7 +270,7 @@ func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
// We set it to stopping than offline to prevent crash detection from being triggered. // We set it to stopping than offline to prevent crash detection from being triggered.
e.SetState(environment.ProcessStoppingState) e.SetState(environment.ProcessStoppingState)
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed") sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
if err := e.client.ContainerKill(ctx, e.Id, sig); err != nil && !client.IsErrNotFound(err) { if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil && !client.IsErrNotFound(err) {
return errors.WithStack(err) return errors.WithStack(err)
} }
e.SetState(environment.ProcessOfflineState) e.SetState(environment.ProcessOfflineState)

View File

@@ -3,7 +3,6 @@ package environment
import ( import (
"context" "context"
"os" "os"
"time"
"github.com/pterodactyl/wings/events" "github.com/pterodactyl/wings/events"
) )
@@ -59,20 +58,18 @@ type ProcessEnvironment interface {
// can be started an error should be returned. // can be started an error should be returned.
Start(ctx context.Context) error Start(ctx context.Context) error
// Stop stops a server instance. If the server is already stopped an error will // Stops a server instance. If the server is already stopped an error should
// not be returned, this function will act as a no-op. // not be returned.
Stop(ctx context.Context) error Stop() error
// WaitForStop waits for a server instance to stop gracefully. If the server is // Waits for a server instance to stop gracefully. If the server is still detected
// still detected as running after "duration", an error will be returned, or the server // as running after seconds, an error will be returned, or the server will be terminated
// will be terminated depending on the value of the second argument. If the context // depending on the value of the second argument.
// provided is canceled the underlying wait conditions will be stopped and the WaitForStop(seconds uint, terminate bool) error
// entire loop will be ended (potentially without stopping or terminating).
WaitForStop(ctx context.Context, duration time.Duration, terminate bool) error
// Terminate stops a running server instance using the provided signal. This function // Terminates a running server instance using the provided signal. If the server
// is a no-op if the server is already stopped. // is not running no error should be returned.
Terminate(ctx context.Context, signal os.Signal) error Terminate(signal os.Signal) error
// Destroys the environment removing any containers that were created (in Docker // Destroys the environment removing any containers that were created (in Docker
// environments at least). // environments at least).

View File

@@ -2,12 +2,11 @@ package events
import ( import (
"strings" "strings"
"sync"
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
) )
type Listener chan Event
// Event represents an Event sent over a Bus. // Event represents an Event sent over a Bus.
type Event struct { type Event struct {
Topic string Topic string
@@ -16,55 +15,137 @@ type Event struct {
// Bus represents an Event Bus. // Bus represents an Event Bus.
type Bus struct { type Bus struct {
*system.SinkPool listenersMx sync.Mutex
listeners map[string][]Listener
} }
// NewBus returns a new empty Bus. This is simply a nicer wrapper around the // NewBus returns a new empty Event Bus.
// system.SinkPool implementation that allows for more simplistic usage within
// the codebase.
//
// All of the events emitted out of this bus are byte slices that can be decoded
// back into an events.Event interface.
func NewBus() *Bus { func NewBus() *Bus {
return &Bus{ return &Bus{
system.NewSinkPool(), listeners: make(map[string][]Listener),
}
}
// Off unregisters a listener from the specified topics on the Bus.
func (b *Bus) Off(listener Listener, topics ...string) {
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
var closed bool
for _, topic := range topics {
ok := b.off(topic, listener)
if !closed && ok {
close(listener)
closed = true
}
}
}
func (b *Bus) off(topic string, listener Listener) bool {
listeners, ok := b.listeners[topic]
if !ok {
return false
}
for i, l := range listeners {
if l != listener {
continue
}
listeners = append(listeners[:i], listeners[i+1:]...)
b.listeners[topic] = listeners
return true
}
return false
}
// On registers a listener to the specified topics on the Bus.
func (b *Bus) On(listener Listener, topics ...string) {
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
for _, topic := range topics {
b.on(topic, listener)
}
}
func (b *Bus) on(topic string, listener Listener) {
listeners, ok := b.listeners[topic]
if !ok {
b.listeners[topic] = []Listener{listener}
} else {
b.listeners[topic] = append(listeners, listener)
} }
} }
// Publish publishes a message to the Bus. // Publish publishes a message to the Bus.
func (b *Bus) Publish(topic string, data interface{}) { func (b *Bus) Publish(topic string, data interface{}) {
// Some of our actions for the socket support passing a more specific namespace, // Some of our topics for the socket support passing a more specific namespace,
// such as "backup completed:1234" to indicate which specific backup was completed. // such as "backup completed:1234" to indicate which specific backup was completed.
// //
// In these cases, we still need to send the event using the standard listener // In these cases, we still need to send the event using the standard listener
// name of "backup completed". // name of "backup completed".
if strings.Contains(topic, ":") { if strings.Contains(topic, ":") {
parts := strings.SplitN(topic, ":", 2) parts := strings.SplitN(topic, ":", 2)
if len(parts) == 2 { if len(parts) == 2 {
topic = parts[0] topic = parts[0]
} }
} }
enc, err := json.Marshal(Event{Topic: topic, Data: data}) b.listenersMx.Lock()
if err != nil { defer b.listenersMx.Unlock()
panic(errors.WithStack(err))
listeners, ok := b.listeners[topic]
if !ok {
return
} }
b.Push(enc) if len(listeners) < 1 {
return
}
var wg sync.WaitGroup
event := Event{Topic: topic, Data: data}
for _, listener := range listeners {
l := listener
wg.Add(1)
go func(l Listener, event Event) {
defer wg.Done()
l <- event
}(l, event)
}
wg.Wait()
} }
// MustDecode decodes the event byte slice back into an events.Event struct or // Destroy destroys the Event Bus by unregistering and closing all listeners.
// panics if an error is encountered during this process. func (b *Bus) Destroy() {
func MustDecode(data []byte) (e Event) { b.listenersMx.Lock()
if err := DecodeTo(data, &e); err != nil { defer b.listenersMx.Unlock()
panic(err)
// Track what listeners have already been closed. Because the same listener
// can be listening on multiple topics, we need a way to essentially
// "de-duplicate" all the listeners across all the topics.
var closed []Listener
for _, listeners := range b.listeners {
for _, listener := range listeners {
if contains(closed, listener) {
continue
}
close(listener)
closed = append(closed, listener)
}
} }
return
b.listeners = make(map[string][]Listener)
} }
// DecodeTo decodes a byte slice of event data into the given interface. func contains(closed []Listener, listener Listener) bool {
func DecodeTo(data []byte, v interface{}) error { for _, c := range closed {
if err := json.Unmarshal(data, &v); err != nil { if c == listener {
return errors.Wrap(err, "events: failed to decode byte slice") return true
}
} }
return nil return false
} }

View File

@@ -9,90 +9,162 @@ import (
func TestNewBus(t *testing.T) { func TestNewBus(t *testing.T) {
g := Goblin(t) g := Goblin(t)
bus := NewBus()
g.Describe("Events", func() { g.Describe("NewBus", func() {
var bus *Bus g.It("is not nil", func() {
g.BeforeEach(func() { g.Assert(bus).IsNotNil("Bus expected to not be nil")
bus = NewBus() g.Assert(bus.listeners).IsNotNil("Bus#listeners expected to not be nil")
}) })
})
g.Describe("NewBus", func() { }
g.It("is not nil", func() {
g.Assert(bus).IsNotNil("Bus expected to not be nil") func TestBus_Off(t *testing.T) {
}) g := Goblin(t)
})
const topic = "test"
g.Describe("Publish", func() {
const topic = "test" g.Describe("Off", func() {
const message = "this is a test message!" g.It("unregisters listener", func() {
bus := NewBus()
g.It("publishes message", func() {
bus := NewBus() g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan []byte) listener := make(chan Event)
bus.On(listener) bus.On(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
done := make(chan struct{}, 1)
go func() { bus.Off(listener, topic)
select { g.Assert(len(bus.listeners[topic])).Equal(0, "Topic still has one or more listeners")
case v := <-listener: })
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic) g.It("unregisters correct listener", func() {
g.Assert(m.Data).Equal(message) bus := NewBus()
case <-time.After(1 * time.Second):
g.Fail("listener did not receive message in time") listener := make(chan Event)
} listener2 := make(chan Event)
done <- struct{}{} listener3 := make(chan Event)
}() bus.On(listener, topic)
bus.Publish(topic, message) bus.On(listener2, topic)
<-done bus.On(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(3, "Listeners were not registered")
// Cleanup
bus.Off(listener) bus.Off(listener, topic)
}) bus.Off(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Expected 1 listener to remain")
g.It("publishes message to all listeners", func() {
bus := NewBus() if bus.listeners[topic][0] != listener2 {
// A normal Assert does not properly compare channels.
listener := make(chan []byte) g.Fail("wrong listener unregistered")
listener2 := make(chan []byte) }
listener3 := make(chan []byte)
bus.On(listener) // Cleanup
bus.On(listener2) bus.Off(listener2, topic)
bus.On(listener3) })
})
done := make(chan struct{}, 1) }
go func() {
for i := 0; i < 3; i++ { func TestBus_On(t *testing.T) {
select { g := Goblin(t)
case v := <-listener:
m := MustDecode(v) const topic = "test"
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message) g.Describe("On", func() {
case v := <-listener2: g.It("registers listener", func() {
m := MustDecode(v) bus := NewBus()
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message) g.Assert(bus.listeners[topic]).IsNotNil()
case v := <-listener3: g.Assert(len(bus.listeners[topic])).IsZero()
m := MustDecode(v) listener := make(chan Event)
g.Assert(m.Topic).Equal(topic) bus.On(listener, topic)
g.Assert(m.Data).Equal(message) g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
case <-time.After(1 * time.Second):
g.Fail("all listeners did not receive the message in time") if bus.listeners[topic][0] != listener {
i = 3 // A normal Assert does not properly compare channels.
} g.Fail("wrong listener registered")
} }
done <- struct{}{} // Cleanup
}() bus.Off(listener, topic)
bus.Publish(topic, message) })
<-done })
}
// Cleanup
bus.Off(listener) func TestBus_Publish(t *testing.T) {
bus.Off(listener2) g := Goblin(t)
bus.Off(listener3)
}) const topic = "test"
const message = "this is a test message!"
g.Describe("Publish", func() {
g.It("publishes message", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
bus.On(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
done := make(chan struct{}, 1)
go func() {
select {
case m := <-listener:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("listener did not receive message in time")
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener, topic)
})
g.It("publishes message to all listeners", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
listener2 := make(chan Event)
listener3 := make(chan Event)
bus.On(listener, topic)
bus.On(listener2, topic)
bus.On(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(3, "Listener was not registered")
done := make(chan struct{}, 1)
go func() {
for i := 0; i < 3; i++ {
select {
case m := <-listener:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case m := <-listener2:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case m := <-listener3:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("all listeners did not receive the message in time")
i = 3
}
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener, topic)
bus.Off(listener2, topic)
bus.Off(listener3, topic)
}) })
}) })
} }

View File

@@ -11,9 +11,9 @@ import (
"github.com/apex/log" "github.com/apex/log"
"github.com/beevik/etree" "github.com/beevik/etree"
"github.com/buger/jsonparser" "github.com/buger/jsonparser"
"github.com/goccy/go-json"
"github.com/icza/dyno" "github.com/icza/dyno"
"github.com/magiconair/properties" "github.com/magiconair/properties"
"github.com/goccy/go-json"
"gopkg.in/ini.v1" "gopkg.in/ini.v1"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"

View File

@@ -178,7 +178,7 @@ func postServerArchive(c *gin.Context) {
// Ensure the server is offline. Sometimes a "No such container" error gets through // Ensure the server is offline. Sometimes a "No such container" error gets through
// which means the server is already stopped. We can ignore that. // which means the server is already stopped. We can ignore that.
if err := s.Environment.WaitForStop(s.Context(), time.Minute, false); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") { if err := s.Environment.WaitForStop(60, false); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") {
sendTransferLog("Failed to stop server, aborting transfer..") sendTransferLog("Failed to stop server, aborting transfer..")
l.WithField("error", err).Error("failed to stop server") l.WithField("error", err).Error("failed to stop server")
return return

View File

@@ -7,9 +7,8 @@ import (
"emperror.dev/errors" "emperror.dev/errors"
"github.com/goccy/go-json" "github.com/goccy/go-json"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
) )
@@ -89,13 +88,12 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
eventChan := make(chan []byte) eventChan := make(chan events.Event)
logOutput := make(chan []byte, 8) logOutput := make(chan []byte, 8)
installOutput := make(chan []byte, 4) installOutput := make(chan []byte, 4)
h.server.Events().On(eventChan, e...)
h.server.Events().On(eventChan) // TODO: make a sinky h.server.Sink(server.LogSink).On(logOutput)
h.server.Sink(system.LogSink).On(logOutput) h.server.Sink(server.InstallSink).On(installOutput)
h.server.Sink(system.InstallSink).On(installOutput)
onError := func(evt string, err2 error) { onError := func(evt string, err2 error) {
h.Logger().WithField("event", evt).WithField("error", err2).Error("failed to send event over server websocket") h.Logger().WithField("event", evt).WithField("error", err2).Error("failed to send event over server websocket")
@@ -112,23 +110,19 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
select { select {
case <-ctx.Done(): case <-ctx.Done():
break break
case b := <-logOutput: case e := <-logOutput:
sendErr := h.SendJson(Message{Event: server.ConsoleOutputEvent, Args: []string{string(b)}}) sendErr := h.SendJson(Message{Event: server.ConsoleOutputEvent, Args: []string{string(e)}})
if sendErr == nil { if sendErr == nil {
continue continue
} }
onError(server.ConsoleOutputEvent, sendErr) onError(server.ConsoleOutputEvent, sendErr)
case b := <-installOutput: case e := <-installOutput:
sendErr := h.SendJson(Message{Event: server.InstallOutputEvent, Args: []string{string(b)}}) sendErr := h.SendJson(Message{Event: server.InstallOutputEvent, Args: []string{string(e)}})
if sendErr == nil { if sendErr == nil {
continue continue
} }
onError(server.InstallOutputEvent, sendErr) onError(server.InstallOutputEvent, sendErr)
case b := <-eventChan: case e := <-eventChan:
var e events.Event
if err := events.DecodeTo(b, &e); err != nil {
continue
}
var sendErr error var sendErr error
message := Message{Event: e.Topic} message := Message{Event: e.Topic}
if str, ok := e.Data.(string); ok { if str, ok := e.Data.(string); ok {
@@ -154,9 +148,9 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
} }
// These functions will automatically close the channel if it hasn't been already. // These functions will automatically close the channel if it hasn't been already.
h.server.Events().Off(eventChan) h.server.Events().Off(eventChan, e...)
h.server.Sink(system.LogSink).Off(logOutput) h.server.Sink(server.LogSink).Off(logOutput)
h.server.Sink(system.InstallSink).Off(installOutput) h.server.Sink(server.InstallSink).Off(installOutput)
// If the internal context is stopped it is either because the parent context // If the internal context is stopped it is either because the parent context
// got canceled or because we ran into an error. If the "err" variable is nil // got canceled or because we ran into an error. If the "err" variable is nil

View File

@@ -11,10 +11,9 @@ import (
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3" "github.com/gbrlsnchs/jwt/v3"
"github.com/goccy/go-json"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
"github.com/pterodactyl/wings/system" "github.com/goccy/go-json"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
@@ -354,7 +353,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
} }
err := h.server.HandlePowerAction(action) err := h.server.HandlePowerAction(action)
if errors.Is(err, system.ErrLockerLocked) { if errors.Is(err, context.DeadlineExceeded) {
m, _ := h.GetErrorMessage("another power action is currently being processed for this server, please try again later") m, _ := h.GetErrorMessage("another power action is currently being processed for this server, please try again later")
_ = h.SendJson(Message{ _ = h.SendJson(Message{

View File

@@ -142,7 +142,7 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
// instance, otherwise you'll likely hit all types of write errors due to the // instance, otherwise you'll likely hit all types of write errors due to the
// server being suspended. // server being suspended.
if s.Environment.State() != environment.ProcessOfflineState { if s.Environment.State() != environment.ProcessOfflineState {
if err = s.Environment.WaitForStop(s.Context(), time.Minute*2, false); err != nil { if err = s.Environment.WaitForStop(120, false); err != nil {
if !client.IsErrNotFound(err) { if !client.IsErrNotFound(err) {
return errors.WrapIf(err, "server/backup: restore: failed to wait for container stop") return errors.WrapIf(err, "server/backup: restore: failed to wait for container stop")
} }

View File

@@ -6,14 +6,12 @@ import (
"github.com/gammazero/workerpool" "github.com/gammazero/workerpool"
) )
// UpdateConfigurationFiles updates all of the defined configuration files for // Parent function that will update all of the defined configuration files for a server
// a server automatically to ensure that they always use the specified values. // automatically to ensure that they always use the specified values.
func (s *Server) UpdateConfigurationFiles() { func (s *Server) UpdateConfigurationFiles() {
pool := workerpool.New(runtime.NumCPU()) pool := workerpool.New(runtime.NumCPU())
s.Log().Debug("acquiring process configuration files...")
files := s.ProcessConfiguration().ConfigurationFiles files := s.ProcessConfiguration().ConfigurationFiles
s.Log().Debug("acquired process configuration files")
for _, cf := range files { for _, cf := range files {
f := cf f := cf
@@ -28,8 +26,6 @@ func (s *Server) UpdateConfigurationFiles() {
if err := f.Parse(p, false); err != nil { if err := f.Parse(p, false); err != nil {
s.Log().WithField("error", err).Error("failed to parse and update server configuration file") s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
} }
s.Log().WithField("path", f.FileName).Debug("finished processing server configuration file")
}) })
} }

View File

@@ -2,7 +2,6 @@ package server
import ( import (
"github.com/pterodactyl/wings/events" "github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
) )
// Defines all of the possible output events for a server. // Defines all of the possible output events for a server.
@@ -21,7 +20,7 @@ const (
TransferStatusEvent = "transfer status" TransferStatusEvent = "transfer status"
) )
// Events returns the server's emitter instance. // Returns the server's emitter instance.
func (s *Server) Events() *events.Bus { func (s *Server) Events() *events.Bus {
s.emitterLock.Lock() s.emitterLock.Lock()
defer s.emitterLock.Unlock() defer s.emitterLock.Unlock()
@@ -32,24 +31,3 @@ func (s *Server) Events() *events.Bus {
return s.emitter return s.emitter
} }
// Sink returns the instantiated and named sink for a server. If the sink has
// not been configured yet this function will cause a panic condition.
func (s *Server) Sink(name system.SinkName) *system.SinkPool {
sink, ok := s.sinks[name]
if !ok {
s.Log().Fatalf("attempt to access nil sink: %s", name)
}
return sink
}
// DestroyAllSinks iterates over all of the sinks configured for the server and
// destroys their instances. Note that this will cause a panic if you attempt
// to call Server.Sink() again after. This function is only used when a server
// is being deleted from the system.
func (s *Server) DestroyAllSinks() {
s.Log().Info("destroying all registered sinks for server instance")
for _, sink := range s.sinks {
sink.Destroy()
}
}

View File

@@ -10,7 +10,6 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"time"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
@@ -18,18 +17,18 @@ import (
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/remote" "github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
) )
// Install executes the installation stack for a server process. Bubbles any // Executes the installation stack for a server process. Bubbles any errors up to the calling
// errors up to the calling function which should handle contacting the panel to // function which should handle contacting the panel to notify it of the server state.
// notify it of the server state.
// //
// Pass true as the first argument in order to execute a server sync before the // Pass true as the first argument in order to execute a server sync before the process to
// process to ensure the latest information is used. // ensure the latest information is used.
func (s *Server) Install(sync bool) error { func (s *Server) Install(sync bool) error {
if sync { if sync {
s.Log().Info("syncing server state with remote source before executing installation process") s.Log().Info("syncing server state with remote source before executing installation process")
@@ -80,7 +79,7 @@ func (s *Server) Install(sync bool) error {
func (s *Server) Reinstall() error { func (s *Server) Reinstall() error {
if s.Environment.State() != environment.ProcessOfflineState { if s.Environment.State() != environment.ProcessOfflineState {
s.Log().Debug("waiting for server instance to enter a stopped state") s.Log().Debug("waiting for server instance to enter a stopped state")
if err := s.Environment.WaitForStop(s.Context(), time.Second*10, true); err != nil { if err := s.Environment.WaitForStop(10, true); err != nil {
return err return err
} }
} }
@@ -111,7 +110,9 @@ func (s *Server) internalInstall() error {
type InstallationProcess struct { type InstallationProcess struct {
Server *Server Server *Server
Script *remote.InstallationScript Script *remote.InstallationScript
client *client.Client
client *client.Client
context context.Context
} }
// Generates a new installation process struct that will be used to create containers, // Generates a new installation process struct that will be used to create containers,
@@ -126,6 +127,7 @@ func NewInstallationProcess(s *Server, script *remote.InstallationScript) (*Inst
return nil, err return nil, err
} else { } else {
proc.client = c proc.client = c
proc.context = s.Context()
} }
return proc, nil return proc, nil
@@ -155,7 +157,7 @@ func (s *Server) SetRestoring(state bool) {
// Removes the installer container for the server. // Removes the installer container for the server.
func (ip *InstallationProcess) RemoveContainer() error { func (ip *InstallationProcess) RemoveContainer() error {
err := ip.client.ContainerRemove(ip.Server.Context(), ip.Server.ID()+"_installer", types.ContainerRemoveOptions{ err := ip.client.ContainerRemove(ip.context, ip.Server.ID()+"_installer", types.ContainerRemoveOptions{
RemoveVolumes: true, RemoveVolumes: true,
Force: true, Force: true,
}) })
@@ -165,10 +167,11 @@ func (ip *InstallationProcess) RemoveContainer() error {
return nil return nil
} }
// Run runs the installation process, this is done as in a background thread. // Runs the installation process, this is done as in a background thread. This will configure
// This will configure the required environment, and then spin up the // the required environment, and then spin up the installation container.
// installation container. Once the container finishes installing the results //
// are stored in an installation log in the server's configuration directory. // Once the container finishes installing the results will be stored in an installation
// log in the server's configuration directory.
func (ip *InstallationProcess) Run() error { func (ip *InstallationProcess) Run() error {
ip.Server.Log().Debug("acquiring installation process lock") ip.Server.Log().Debug("acquiring installation process lock")
if !ip.Server.installing.SwapIf(true) { if !ip.Server.installing.SwapIf(true) {
@@ -264,9 +267,9 @@ func (ip *InstallationProcess) pullInstallationImage() error {
imagePullOptions.RegistryAuth = b64 imagePullOptions.RegistryAuth = b64
} }
r, err := ip.client.ImagePull(ip.Server.Context(), ip.Script.ContainerImage, imagePullOptions) r, err := ip.client.ImagePull(context.Background(), ip.Script.ContainerImage, imagePullOptions)
if err != nil { if err != nil {
images, ierr := ip.client.ImageList(ip.Server.Context(), types.ImageListOptions{}) images, ierr := ip.client.ImageList(context.Background(), types.ImageListOptions{})
if ierr != nil { if ierr != nil {
// Well damn, something has gone really wrong here, just go ahead and abort there // Well damn, something has gone really wrong here, just go ahead and abort there
// isn't much anything we can do to try and self-recover from this. // isn't much anything we can do to try and self-recover from this.
@@ -309,10 +312,9 @@ func (ip *InstallationProcess) pullInstallationImage() error {
return nil return nil
} }
// BeforeExecute runs before the container is executed. This pulls down the // Runs before the container is executed. This pulls down the required docker container image
// required docker container image as well as writes the installation script to // as well as writes the installation script to the disk. This process is executed in an async
// the disk. This process is executed in an async manner, if either one fails // manner, if either one fails the error is returned.
// the error is returned.
func (ip *InstallationProcess) BeforeExecute() error { func (ip *InstallationProcess) BeforeExecute() error {
if err := ip.writeScriptToDisk(); err != nil { if err := ip.writeScriptToDisk(); err != nil {
return errors.WithMessage(err, "failed to write installation script to disk") return errors.WithMessage(err, "failed to write installation script to disk")
@@ -338,7 +340,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
defer ip.RemoveContainer() defer ip.RemoveContainer()
ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server") ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server")
reader, err := ip.client.ContainerLogs(ip.Server.Context(), containerId, types.ContainerLogsOptions{ reader, err := ip.client.ContainerLogs(ip.context, containerId, types.ContainerLogsOptions{
ShowStdout: true, ShowStdout: true,
ShowStderr: true, ShowStderr: true,
Follow: false, Follow: false,
@@ -393,13 +395,12 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
return nil return nil
} }
// Execute executes the installation process inside a specially created docker // Executes the installation process inside a specially created docker container.
// container.
func (ip *InstallationProcess) Execute() (string, error) { func (ip *InstallationProcess) Execute() (string, error) {
// Create a child context that is canceled once this function is done running. This // Create a child context that is canceled once this function is done running. This
// will also be canceled if the parent context (from the Server struct) is canceled // will also be canceled if the parent context (from the Server struct) is canceled
// which occurs if the server is deleted. // which occurs if the server is deleted.
ctx, cancel := context.WithCancel(ip.Server.Context()) ctx, cancel := context.WithCancel(ip.context)
defer cancel() defer cancel()
conf := &container.Config{ conf := &container.Config{
@@ -510,15 +511,18 @@ func (ip *InstallationProcess) Execute() (string, error) {
// the server configuration directory, as well as to a websocket listener so // the server configuration directory, as well as to a websocket listener so
// that the process can be viewed in the panel by administrators. // that the process can be viewed in the panel by administrators.
func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error { func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error {
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: true} reader, err := ip.client.ContainerLogs(ctx, id, types.ContainerLogsOptions{
reader, err := ip.client.ContainerLogs(ctx, id, opts) ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil { if err != nil {
return err return err
} }
defer reader.Close() defer reader.Close()
err = system.ScanReader(reader, ip.Server.Sink(system.InstallSink).Push) err = system.ScanReader(reader, ip.Server.Sink(InstallSink).Push)
if err != nil && !errors.Is(err, context.Canceled) { if err != nil {
ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines") ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines")
} }
return nil return nil

View File

@@ -5,13 +5,11 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"sync" "sync"
"time"
"github.com/apex/log" "github.com/apex/log"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote" "github.com/pterodactyl/wings/remote"
) )
@@ -46,7 +44,7 @@ func (dsl *diskSpaceLimiter) Reset() {
func (dsl *diskSpaceLimiter) Trigger() { func (dsl *diskSpaceLimiter) Trigger() {
dsl.o.Do(func() { dsl.o.Do(func() {
dsl.server.PublishConsoleOutputFromDaemon("Server is exceeding the assigned disk space limit, stopping process now.") dsl.server.PublishConsoleOutputFromDaemon("Server is exceeding the assigned disk space limit, stopping process now.")
if err := dsl.server.Environment.WaitForStop(dsl.server.Context(), time.Minute, true); err != nil { if err := dsl.server.Environment.WaitForStop(60, true); err != nil {
dsl.server.Log().WithField("error", err).Error("failed to stop server after exceeding space limit!") dsl.server.Log().WithField("error", err).Error("failed to stop server after exceeding space limit!")
} }
}) })
@@ -74,57 +72,47 @@ func (s *Server) processConsoleOutputEvent(v []byte) {
return return
} }
s.Sink(system.LogSink).Push(v) s.Sink(LogSink).Push(v)
} }
// StartEventListeners adds all the internal event listeners we want to use for // StartEventListeners adds all the internal event listeners we want to use for
// a server. These listeners can only be removed by deleting the server as they // a server. These listeners can only be removed by deleting the server as they
// should last for the duration of the process' lifetime. // should last for the duration of the process' lifetime.
func (s *Server) StartEventListeners() { func (s *Server) StartEventListeners() {
c := make(chan []byte, 8) state := make(chan events.Event)
limit := newDiskLimiter(s) stats := make(chan events.Event)
docker := make(chan events.Event)
s.Log().Debug("registering event listeners: console, state, resources...")
s.Environment.Events().On(c)
s.Environment.SetLogCallback(s.processConsoleOutputEvent)
go func() { go func() {
l := newDiskLimiter(s)
for { for {
select { select {
case v := <-c: case e := <-state:
go func(v []byte, limit *diskSpaceLimiter) { go func() {
var e events.Event // Reset the throttler when the process is started.
if err := events.DecodeTo(v, &e); err != nil { if e.Data == environment.ProcessStartingState {
return l.Reset()
s.Throttler().Reset()
} }
s.OnStateChange()
}()
case e := <-stats:
go func() {
s.resources.UpdateStats(e.Data.(environment.Stats))
// If there is no disk space available at this point, trigger the server
// disk limiter logic which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) {
l.Trigger()
}
s.Events().Publish(StatsEvent, s.Proc())
}()
case e := <-docker:
go func() {
switch e.Topic { switch e.Topic {
case environment.ResourceEvent:
{
var stats struct {
Topic string
Data environment.Stats
}
if err := events.DecodeTo(v, &stats); err != nil {
s.Log().WithField("error", err).Warn("failed to decode server resource event")
return
}
s.resources.UpdateStats(stats.Data)
// If there is no disk space available at this point, trigger the server
// disk limiter logic which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) {
limit.Trigger()
}
s.Events().Publish(StatsEvent, s.Proc())
}
case environment.StateChangeEvent:
{
// Reset the throttler when the process is started.
if e.Data == environment.ProcessStartingState {
limit.Reset()
s.Throttler().Reset()
}
s.OnStateChange()
}
case environment.DockerImagePullStatus: case environment.DockerImagePullStatus:
s.Events().Publish(InstallOutputEvent, e.Data) s.Events().Publish(InstallOutputEvent, e.Data)
case environment.DockerImagePullStarted: case environment.DockerImagePullStarted:
@@ -132,13 +120,18 @@ func (s *Server) StartEventListeners() {
case environment.DockerImagePullCompleted: case environment.DockerImagePullCompleted:
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image") s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
default: default:
s.Log().WithField("topic", e.Topic).Error("unhandled docker event topic")
} }
}(v, limit) }()
case <-s.Context().Done():
return
} }
} }
}() }()
s.Log().Debug("registering event listeners: console, state, resources...")
s.Environment.SetLogCallback(s.processConsoleOutputEvent)
s.Environment.Events().On(state, environment.StateChangeEvent)
s.Environment.Events().On(stats, environment.ResourceEvent)
s.Environment.Events().On(docker, dockerEvents...)
} }
var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))") var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))")

View File

@@ -133,11 +133,11 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return s.Environment.Start(s.Context()) return s.Environment.Start(s.Context())
case PowerActionStop: case PowerActionStop:
fallthrough // We're specifically waiting for the process to be stopped here, otherwise the lock is released
// too soon, and you can rack up all sorts of issues.
return s.Environment.WaitForStop(10*60, true)
case PowerActionRestart: case PowerActionRestart:
// We're specifically waiting for the process to be stopped here, otherwise the lock is if err := s.Environment.WaitForStop(10*60, true); err != nil {
// released too soon, and you can rack up all sorts of issues.
if err := s.Environment.WaitForStop(s.Context(), time.Minute*10, true); err != nil {
// Even timeout errors should be bubbled back up the stack. If the process didn't stop // Even timeout errors should be bubbled back up the stack. If the process didn't stop
// nicely, but the terminate argument was passed then the server is stopped without an // nicely, but the terminate argument was passed then the server is stopped without an
// error being returned. // error being returned.
@@ -149,10 +149,6 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return err return err
} }
if action == PowerActionStop {
return nil
}
// Now actually try to start the process by executing the normal pre-boot logic. // Now actually try to start the process by executing the normal pre-boot logic.
if err := s.onBeforeStart(); err != nil { if err := s.onBeforeStart(); err != nil {
return err return err
@@ -160,7 +156,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return s.Environment.Start(s.Context()) return s.Environment.Start(s.Context())
case PowerActionTerminate: case PowerActionTerminate:
return s.Environment.Terminate(s.Context(), os.Kill) return s.Environment.Terminate(os.Kill)
} }
return errors.New("attempting to handle unknown power action") return errors.New("attempting to handle unknown power action")
@@ -201,19 +197,15 @@ func (s *Server) onBeforeStart() error {
// we don't need to actively do anything about it at this point, worse comes to worst the // we don't need to actively do anything about it at this point, worse comes to worst the
// server starts in a weird state and the user can manually adjust. // server starts in a weird state and the user can manually adjust.
s.PublishConsoleOutputFromDaemon("Updating process configuration files...") s.PublishConsoleOutputFromDaemon("Updating process configuration files...")
s.Log().Debug("updating server configuration files...")
s.UpdateConfigurationFiles() s.UpdateConfigurationFiles()
s.Log().Debug("updated server configuration files")
if config.Get().System.CheckPermissionsOnBoot { if config.Get().System.CheckPermissionsOnBoot {
s.PublishConsoleOutputFromDaemon("Ensuring file permissions are set correctly, this could take a few seconds...") s.PublishConsoleOutputFromDaemon("Ensuring file permissions are set correctly, this could take a few seconds...")
// Ensure all the server file permissions are set correctly before booting the process. // Ensure all the server file permissions are set correctly before booting the process.
s.Log().Debug("chowning server root directory...")
if err := s.Filesystem().Chown("/"); err != nil { if err := s.Filesystem().Chown("/"); err != nil {
return errors.WithMessage(err, "failed to chown root server directory during pre-boot process") return errors.WithMessage(err, "failed to chown root server directory during pre-boot process")
} }
} }
s.Log().Info("completed server preflight, starting boot process...")
return nil return nil
} }

View File

@@ -70,10 +70,10 @@ type Server struct {
wsBag *WebsocketBag wsBag *WebsocketBag
wsBagLocker sync.Mutex wsBagLocker sync.Mutex
sinks map[system.SinkName]*system.SinkPool sinks map[SinkName]*sinkPool
logSink *system.SinkPool logSink *sinkPool
installSink *system.SinkPool installSink *sinkPool
} }
// New returns a new server instance with a context and all of the default // New returns a new server instance with a context and all of the default
@@ -88,9 +88,9 @@ func New(client remote.Client) (*Server, error) {
transferring: system.NewAtomicBool(false), transferring: system.NewAtomicBool(false),
restoring: system.NewAtomicBool(false), restoring: system.NewAtomicBool(false),
powerLock: system.NewLocker(), powerLock: system.NewLocker(),
sinks: map[system.SinkName]*system.SinkPool{ sinks: map[SinkName]*sinkPool{
system.LogSink: system.NewSinkPool(), LogSink: newSinkPool(),
system.InstallSink: system.NewSinkPool(), InstallSink: newSinkPool(),
}, },
} }
if err := defaults.Set(&s); err != nil { if err := defaults.Set(&s); err != nil {

View File

@@ -1,4 +1,4 @@
package system package server
import ( import (
"sync" "sync"
@@ -16,20 +16,20 @@ const (
InstallSink SinkName = "install" InstallSink SinkName = "install"
) )
// SinkPool represents a pool with sinks. // sinkPool represents a pool with sinks.
type SinkPool struct { type sinkPool struct {
mu sync.RWMutex mu sync.RWMutex
sinks []chan []byte sinks []chan []byte
} }
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a // newSinkPool returns a new empty sinkPool. A sink pool generally lives with a
// server instance for it's full lifetime. // server instance for it's full lifetime.
func NewSinkPool() *SinkPool { func newSinkPool() *sinkPool {
return &SinkPool{} return &sinkPool{}
} }
// On adds a channel to the sink pool instance. // On adds a channel to the sink pool instance.
func (p *SinkPool) On(c chan []byte) { func (p *sinkPool) On(c chan []byte) {
p.mu.Lock() p.mu.Lock()
p.sinks = append(p.sinks, c) p.sinks = append(p.sinks, c)
p.mu.Unlock() p.mu.Unlock()
@@ -37,7 +37,7 @@ func (p *SinkPool) On(c chan []byte) {
// Off removes a given channel from the sink pool. If no matching sink is found // Off removes a given channel from the sink pool. If no matching sink is found
// this function is a no-op. If a matching channel is found, it will be removed. // this function is a no-op. If a matching channel is found, it will be removed.
func (p *SinkPool) Off(c chan []byte) { func (p *sinkPool) Off(c chan []byte) {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
@@ -66,7 +66,7 @@ func (p *SinkPool) Off(c chan []byte) {
// Destroy destroys the pool by removing and closing all sinks and destroying // Destroy destroys the pool by removing and closing all sinks and destroying
// all of the channels that are present. // all of the channels that are present.
func (p *SinkPool) Destroy() { func (p *sinkPool) Destroy() {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
@@ -95,7 +95,7 @@ func (p *SinkPool) Destroy() {
// likely the best option anyways. This uses waitgroups to allow every channel // likely the best option anyways. This uses waitgroups to allow every channel
// to attempt its send concurrently thus making the total blocking time of this // to attempt its send concurrently thus making the total blocking time of this
// function "O(1)" instead of "O(n)". // function "O(1)" instead of "O(n)".
func (p *SinkPool) Push(data []byte) { func (p *sinkPool) Push(data []byte) {
p.mu.RLock() p.mu.RLock()
defer p.mu.RUnlock() defer p.mu.RUnlock()
var wg sync.WaitGroup var wg sync.WaitGroup
@@ -119,3 +119,24 @@ func (p *SinkPool) Push(data []byte) {
} }
wg.Wait() wg.Wait()
} }
// Sink returns the instantiated and named sink for a server. If the sink has
// not been configured yet this function will cause a panic condition.
func (s *Server) Sink(name SinkName) *sinkPool {
sink, ok := s.sinks[name]
if !ok {
s.Log().Fatalf("attempt to access nil sink: %s", name)
}
return sink
}
// DestroyAllSinks iterates over all of the sinks configured for the server and
// destroys their instances. Note that this will cause a panic if you attempt
// to call Server.Sink() again after. This function is only used when a server
// is being deleted from the system.
func (s *Server) DestroyAllSinks() {
s.Log().Info("destroying all registered sinks for server instance")
for _, sink := range s.sinks {
sink.Destroy()
}
}

View File

@@ -1,4 +1,4 @@
package system package server
import ( import (
"fmt" "fmt"
@@ -23,7 +23,7 @@ func TestSink(t *testing.T) {
g.Describe("SinkPool#On", func() { g.Describe("SinkPool#On", func() {
g.It("pushes additional channels to a sink", func() { g.It("pushes additional channels to a sink", func() {
pool := &SinkPool{} pool := &sinkPool{}
g.Assert(pool.sinks).IsZero() g.Assert(pool.sinks).IsZero()
@@ -36,9 +36,9 @@ func TestSink(t *testing.T) {
}) })
g.Describe("SinkPool#Off", func() { g.Describe("SinkPool#Off", func() {
var pool *SinkPool var pool *sinkPool
g.BeforeEach(func() { g.BeforeEach(func() {
pool = &SinkPool{} pool = &sinkPool{}
}) })
g.It("works when no sinks are registered", func() { g.It("works when no sinks are registered", func() {
@@ -97,9 +97,9 @@ func TestSink(t *testing.T) {
}) })
g.Describe("SinkPool#Push", func() { g.Describe("SinkPool#Push", func() {
var pool *SinkPool var pool *sinkPool
g.BeforeEach(func() { g.BeforeEach(func() {
pool = &SinkPool{} pool = &sinkPool{}
}) })
g.It("works when no sinks are registered", func() { g.It("works when no sinks are registered", func() {
@@ -190,9 +190,9 @@ func TestSink(t *testing.T) {
}) })
g.Describe("SinkPool#Destroy", func() { g.Describe("SinkPool#Destroy", func() {
var pool *SinkPool var pool *sinkPool
g.BeforeEach(func() { g.BeforeEach(func() {
pool = &SinkPool{} pool = &sinkPool{}
}) })
g.It("works if no sinks are registered", func() { g.It("works if no sinks are registered", func() {

View File

@@ -1,8 +1,6 @@
package server package server
import ( import (
"time"
"github.com/pterodactyl/wings/environment/docker" "github.com/pterodactyl/wings/environment/docker"
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
@@ -60,7 +58,7 @@ func (s *Server) SyncWithEnvironment() {
s.Log().Info("server suspended with running process state, terminating now") s.Log().Info("server suspended with running process state, terminating now")
go func(s *Server) { go func(s *Server) {
if err := s.Environment.WaitForStop(s.Context(), time.Minute, true); err != nil { if err := s.Environment.WaitForStop(60, true); err != nil {
s.Log().WithField("error", err).Warn("failed to terminate server environment after suspension") s.Log().WithField("error", err).Warn("failed to terminate server environment after suspension")
} }
}(s) }(s)

View File

@@ -1,3 +1,3 @@
package system package system
var Version = "1.6.1" var Version = "1.6.0"

View File

@@ -51,9 +51,7 @@ func (l *Locker) TryAcquire(ctx context.Context) error {
return nil return nil
case <-ctx.Done(): case <-ctx.Done():
if err := ctx.Err(); err != nil { if err := ctx.Err(); err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { return err
return ErrLockerLocked
}
} }
return nil return nil
} }

View File

@@ -81,7 +81,7 @@ func TestPower(t *testing.T) {
err := l.TryAcquire(ctx) err := l.TryAcquire(ctx)
g.Assert(err).IsNotNil() g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, ErrLockerLocked)).IsTrue() g.Assert(errors.Is(err, context.DeadlineExceeded)).IsTrue()
g.Assert(cap(l.ch)).Equal(1) g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(1) g.Assert(len(l.ch)).Equal(1)
g.Assert(l.IsLocked()).IsTrue() g.Assert(l.IsLocked()).IsTrue()

View File

@@ -3,10 +3,12 @@ package system
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"strconv" "strconv"
"sync" "sync"
"time"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/goccy/go-json" "github.com/goccy/go-json"
@@ -88,16 +90,16 @@ func ScanReader(r io.Reader, callback func(line []byte)) error {
} else { } else {
buf.Write(line) buf.Write(line)
} }
// If we encountered an error with something in ReadLine that was not an
// EOF just abort the entire process here.
if err != nil && err != io.EOF {
return err
}
// Finish this loop and begin outputting the line if there is no prefix // Finish this loop and begin outputting the line if there is no prefix
// (the line fit into the default buffer), or if we hit the end of the line. // (the line fit into the default buffer), or if we hit the end of the line.
if !isPrefix || err == io.EOF { if !isPrefix || err == io.EOF {
break break
} }
// If we encountered an error with something in ReadLine that was not an
// EOF just abort the entire process here.
if err != nil {
return err
}
} }
// Send the full buffer length over to the event handler to be emitted in // Send the full buffer length over to the event handler to be emitted in
@@ -120,6 +122,22 @@ func ScanReader(r io.Reader, callback func(line []byte)) error {
return nil return nil
} }
// Runs a given work function every "d" duration until the provided context is canceled.
func Every(ctx context.Context, d time.Duration, work func(t time.Time)) {
ticker := time.NewTicker(d)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case t := <-ticker.C:
work(t)
}
}
}()
}
func FormatBytes(b int64) string { func FormatBytes(b int64) string {
if b < 1024 { if b < 1024 {
return fmt.Sprintf("%d B", b) return fmt.Sprintf("%d B", b)