Server Event Optimizations (#116)

This commit is contained in:
Matthew Penner
2022-01-17 20:23:29 -07:00
committed by GitHub
parent 521cc2aef2
commit 649dc9663e
18 changed files with 551 additions and 282 deletions

View File

@@ -79,7 +79,7 @@ func (s *Server) Backup(b backup.BackupInterface) error {
s.Log().WithField("backup", b.Identifier()).Info("notified panel of failed backup state")
}
_ = s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
s.Events().Publish(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
"uuid": b.Identifier(),
"is_successful": false,
"checksum": "",
@@ -103,7 +103,7 @@ func (s *Server) Backup(b backup.BackupInterface) error {
// Emit an event over the socket so we can update the backup in realtime on
// the frontend for the server.
_ = s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
s.Events().Publish(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
"uuid": b.Identifier(),
"is_successful": true,
"checksum": ad.Checksum,

View File

@@ -21,12 +21,12 @@ const (
)
// Returns the server's emitter instance.
func (s *Server) Events() *events.EventBus {
func (s *Server) Events() *events.Bus {
s.emitterLock.Lock()
defer s.emitterLock.Unlock()
if s.emitter == nil {
s.emitter = events.New()
s.emitter = events.NewBus()
}
return s.emitter

View File

@@ -521,10 +521,7 @@ func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) erro
}
defer reader.Close()
evts := ip.Server.Events()
err = system.ScanReader(reader, func(line string) {
evts.Publish(InstallOutputEvent, line)
})
err = system.ScanReader(reader, ip.Server.InstallSink().Push)
if err != nil {
ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines")
}

View File

@@ -1,7 +1,6 @@
package server
import (
"encoding/json"
"regexp"
"strconv"
"sync"
@@ -51,98 +50,103 @@ func (dsl *diskSpaceLimiter) Trigger() {
})
}
func (s *Server) processConsoleOutputEvent(v []byte) {
t := s.Throttler()
err := t.Increment(func() {
s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.")
})
// An error is only returned if the server has breached the thresholds set.
if err != nil {
// If the process is already stopping, just let it continue with that action rather than attempting
// to terminate again.
if s.Environment.State() != environment.ProcessStoppingState {
s.Environment.SetState(environment.ProcessStoppingState)
go func() {
s.Log().Warn("stopping server instance, violating throttle limits")
s.PublishConsoleOutputFromDaemon("Your server is being stopped for outputting too much data in a short period of time.")
// Completely skip over server power actions and terminate the running instance. This gives the
// server 15 seconds to finish stopping gracefully before it is forcefully terminated.
if err := s.Environment.WaitForStop(config.Get().Throttles.StopGracePeriod, true); err != nil {
// If there is an error set the process back to running so that this throttler is called
// again and hopefully kills the server.
if s.Environment.State() != environment.ProcessOfflineState {
s.Environment.SetState(environment.ProcessRunningState)
}
s.Log().WithField("error", err).Error("failed to terminate environment after triggering throttle")
}
}()
}
}
// If we are not throttled, go ahead and output the data.
if !t.Throttled() {
s.LogSink().Push(v)
}
// Also pass the data along to the console output channel.
s.onConsoleOutput(string(v))
}
// StartEventListeners adds all the internal event listeners we want to use for a server. These listeners can only be
// removed by deleting the server as they should last for the duration of the process' lifetime.
func (s *Server) StartEventListeners() {
console := func(e events.Event) {
t := s.Throttler()
err := t.Increment(func() {
s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.")
})
// An error is only returned if the server has breached the thresholds set.
if err != nil {
// If the process is already stopping, just let it continue with that action rather than attempting
// to terminate again.
if s.Environment.State() != environment.ProcessStoppingState {
s.Environment.SetState(environment.ProcessStoppingState)
state := make(chan events.Event)
stats := make(chan events.Event)
docker := make(chan events.Event)
go func() {
l := newDiskLimiter(s)
for {
select {
case e := <-state:
go func() {
s.Log().Warn("stopping server instance, violating throttle limits")
s.PublishConsoleOutputFromDaemon("Your server is being stopped for outputting too much data in a short period of time.")
// Reset the throttler when the process is started.
if e.Data == environment.ProcessStartingState {
l.Reset()
s.Throttler().Reset()
}
// Completely skip over server power actions and terminate the running instance. This gives the
// server 15 seconds to finish stopping gracefully before it is forcefully terminated.
if err := s.Environment.WaitForStop(config.Get().Throttles.StopGracePeriod, true); err != nil {
// If there is an error set the process back to running so that this throttler is called
// again and hopefully kills the server.
if s.Environment.State() != environment.ProcessOfflineState {
s.Environment.SetState(environment.ProcessRunningState)
}
s.OnStateChange()
}()
case e := <-stats:
go func() {
// Update the server resource tracking object with the resources we got here.
s.resources.mu.Lock()
s.resources.Stats = e.Data.(environment.Stats)
s.resources.mu.Unlock()
s.Log().WithField("error", err).Error("failed to terminate environment after triggering throttle")
// If there is no disk space available at this point, trigger the server disk limiter logic
// which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) {
l.Trigger()
}
s.emitProcUsage()
}()
case e := <-docker:
go func() {
switch e.Topic {
case environment.DockerImagePullStatus:
s.Events().Publish(InstallOutputEvent, e.Data)
case environment.DockerImagePullStarted:
s.PublishConsoleOutputFromDaemon("Pulling Docker container image, this could take a few minutes to complete...")
default:
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
}
}()
}
}
// If we are not throttled, go ahead and output the data.
if !t.Throttled() {
s.Events().Publish(ConsoleOutputEvent, e.Data)
}
// Also pass the data along to the console output channel.
s.onConsoleOutput(e.Data)
}
l := newDiskLimiter(s)
state := func(e events.Event) {
// Reset the throttler when the process is started.
if e.Data == environment.ProcessStartingState {
l.Reset()
s.Throttler().Reset()
}
s.OnStateChange()
}
stats := func(e events.Event) {
var st environment.Stats
if err := json.Unmarshal([]byte(e.Data), &st); err != nil {
s.Log().WithField("error", err).Warn("failed to unmarshal server environment stats")
return
}
// Update the server resource tracking object with the resources we got here.
s.resources.mu.Lock()
s.resources.Stats = st
s.resources.mu.Unlock()
// If there is no disk space available at this point, trigger the server disk limiter logic
// which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) {
l.Trigger()
}
s.emitProcUsage()
}
docker := func(e events.Event) {
if e.Topic == environment.DockerImagePullStatus {
s.Events().Publish(InstallOutputEvent, e.Data)
} else if e.Topic == environment.DockerImagePullStarted {
s.PublishConsoleOutputFromDaemon("Pulling Docker container image, this could take a few minutes to complete...")
} else {
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
}
}
}()
s.Log().Debug("registering event listeners: console, state, resources...")
s.Environment.Events().On(environment.ConsoleOutputEvent, &console)
s.Environment.Events().On(environment.StateChangeEvent, &state)
s.Environment.Events().On(environment.ResourceEvent, &stats)
for _, evt := range dockerEvents {
s.Environment.Events().On(evt, &docker)
}
s.Environment.SetLogCallback(s.processConsoleOutputEvent)
s.Environment.Events().On(state, environment.StateChangeEvent)
s.Environment.Events().On(stats, environment.ResourceEvent)
s.Environment.Events().On(docker, dockerEvents...)
}
var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))")

View File

@@ -52,7 +52,5 @@ func (ru *ResourceUsage) Reset() {
}
func (s *Server) emitProcUsage() {
if err := s.Events().PublishJson(StatsEvent, s.Proc()); err != nil {
s.Log().WithField("error", err).Warn("error while emitting server resource usage to listeners")
}
s.Events().Publish(StatsEvent, s.Proc())
}

View File

@@ -49,7 +49,7 @@ type Server struct {
fs *filesystem.Filesystem
// Events emitted by the server instance.
emitter *events.EventBus
emitter *events.Bus
// Defines the process configuration for the server instance. This is dynamically
// fetched from the Pterodactyl Server instance each time the server process is
@@ -70,6 +70,9 @@ type Server struct {
// Tracks open websocket connections for the server.
wsBag *WebsocketBag
wsBagLocker sync.Mutex
logSink *sinkPool
installSink *sinkPool
}
// New returns a new server instance with a context and all of the default
@@ -83,6 +86,9 @@ func New(client remote.Client) (*Server, error) {
installing: system.NewAtomicBool(false),
transferring: system.NewAtomicBool(false),
restoring: system.NewAtomicBool(false),
logSink: newSinkPool(),
installSink: newSinkPool(),
}
if err := defaults.Set(&s); err != nil {
return nil, errors.Wrap(err, "server: could not set default values for struct")
@@ -349,3 +355,11 @@ func (s *Server) ToAPIResponse() APIResponse {
Configuration: *s.Config(),
}
}
func (s *Server) LogSink() *sinkPool {
return s.logSink
}
func (s *Server) InstallSink() *sinkPool {
return s.installSink
}

71
server/sink.go Normal file
View File

@@ -0,0 +1,71 @@
package server
import (
"sync"
"time"
)
// sinkPool represents a pool with sinks.
type sinkPool struct {
mx sync.RWMutex
sinks []chan []byte
}
// newSinkPool returns a new empty sinkPool.
func newSinkPool() *sinkPool {
return &sinkPool{}
}
// On adds a sink on the pool.
func (p *sinkPool) On(c chan []byte) {
p.mx.Lock()
defer p.mx.Unlock()
p.sinks = append(p.sinks, c)
}
// Off removes a sink from the pool.
func (p *sinkPool) Off(c chan []byte) {
p.mx.Lock()
defer p.mx.Unlock()
sinks := p.sinks
for i, sink := range sinks {
if c != sink {
continue
}
copy(sinks[i:], sinks[i+1:])
sinks[len(sinks)-1] = nil
sinks = sinks[:len(sinks)-1]
p.sinks = sinks
return
}
}
// Destroy destroys the pool by removing and closing all sinks.
func (p *sinkPool) Destroy() {
p.mx.Lock()
defer p.mx.Unlock()
for _, c := range p.sinks {
close(c)
}
p.sinks = nil
}
// Push pushes a message to all registered sinks.
func (p *sinkPool) Push(v []byte) {
p.mx.RLock()
for _, c := range p.sinks {
// TODO: should this be done in parallel?
select {
// Send the log output to the channel
case c <- v:
// Timeout after 100 milliseconds, this will cause the write to the channel to be cancelled.
case <-time.After(100 * time.Millisecond):
}
}
p.mx.RUnlock()
}