Compare commits

..

11 Commits

Author SHA1 Message Date
Dane Everitt
de04e73e82 Reduce the size of the buffered reader to improve CPU performance 2022-01-23 18:31:53 -05:00
Dane Everitt
d701b35954 Update CHANGELOG.md 2022-01-23 17:23:02 -05:00
Dane Everitt
34ecf20467 Re-implement ContainerInspect call in Wings to use more performant json encoder (#119)
* First pass at re-implementing the Docker inspect call to use more efficient json parser

* Improve logic
2022-01-23 14:13:49 -08:00
Dane Everitt
34c0db9dff Replace encoding/json with goccy/go-json for cpu and memory usage improvement
This new package has significant better resource usage, and we do a _lot_ of JSON parsing in this application, so any amount of improvement becomes significant
2022-01-23 15:17:40 -05:00
Dane Everitt
301788805c Ensure a file uploaded using SFTP is properly owned at the end; closes pterodactyl/panel#3689 2022-01-23 13:14:02 -05:00
Dane Everitt
4c8f5c21a3 Improve power lock logic (#118) 2022-01-23 09:49:35 -08:00
Dane Everitt
c52db4eec0 Add test coverage for sinks; prevent panic on nil channels 2022-01-23 10:41:12 -05:00
Dane Everitt
a4904365c9 Sink pool cleanup and organization; better future support when we add more sinks 2022-01-23 09:57:25 -05:00
Dane Everitt
2a9c9e893e Add test for scan reader 2022-01-22 14:52:24 -05:00
Dane Everitt
1591d86e23 Quick note about the importance of the copy 2022-01-22 14:33:49 -05:00
Dane Everitt
b5536dfc77 Prevent excessive memory usage when large lines are sent over the console 2022-01-22 14:33:03 -05:00
33 changed files with 865 additions and 170 deletions

View File

@@ -1,5 +1,15 @@
# Changelog
## v1.5.6
### Fixed
* Rewrote handler logic for the power actions lock to hopefully address issues people have been having when a server crashes and they're unable to start it again until restarting Wings.
* Fixes files uploaded with SFTP not being owned by the Pterodactyl user.
* Fixes excessive memory usage when large lines are sent through the console event handler.
### Changed
* Replaced usage of `encoding/json` throughout the codebase with a more performant encoder (`goccy/go-json`) to hopefully improve overall performance for JSON operations.
* Added custom `ContainerInspect` function to handle direct calls to Docker's CLI and make use of the new JSON encoder logic. This should reduce the total number of memory allocations and be more performant overall in a hot pathway.
## v1.5.5
### Fixed
* Fixes sending to a closed channel when sending server logs over the websocket

View File

@@ -2,7 +2,6 @@ package cmd
import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
@@ -14,6 +13,7 @@ import (
"github.com/AlecAivazis/survey/v2"
"github.com/AlecAivazis/survey/v2/terminal"
"github.com/goccy/go-json"
"github.com/spf13/cobra"
"github.com/pterodactyl/wings/config"

View File

@@ -2,7 +2,6 @@ package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
@@ -20,6 +19,7 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/goccy/go-json"
"github.com/spf13/cobra"
"github.com/pterodactyl/wings/config"

View File

@@ -2,10 +2,10 @@ package config
import (
"encoding/base64"
"encoding/json"
"sort"
"github.com/docker/docker/api/types"
"github.com/goccy/go-json"
)
type dockerNetworkInterfaces struct {
@@ -75,6 +75,8 @@ type DockerConfiguration struct {
// Overhead controls the memory overhead given to all containers to circumvent certain
// software such as the JVM not staying below the maximum memory limit.
Overhead Overhead `json:"overhead" yaml:"overhead"`
UsePerformantInspect bool `default:"true" json:"use_performant_inspect" yaml:"use_performant_inspect"`
}
// RegistryConfiguration defines the authentication credentials for a given

116
environment/docker/api.go Normal file
View File

@@ -0,0 +1,116 @@
package docker
import (
"context"
"io"
"net/http"
"reflect"
"strings"
"sync"
"emperror.dev/errors"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
)
var (
o sync.Once
cli cliSettings
fastEnabled bool
)
type cliSettings struct {
enabled bool
proto string
host string
scheme string
version string
}
func configure(c *client.Client) {
o.Do(func() {
fastEnabled = config.Get().Docker.UsePerformantInspect
r := reflect.ValueOf(c).Elem()
cli.proto = r.FieldByName("proto").String()
cli.host = r.FieldByName("addr").String()
cli.scheme = r.FieldByName("scheme").String()
cli.version = r.FieldByName("version").String()
})
}
// ContainerInspect is a rough equivalent of Docker's client.ContainerInspect()
// but re-written to use a more performant JSON decoder. This is important since
// a large number of requests to this endpoint are spawned by Wings, and the
// standard "encoding/json" shows its performance woes badly even with single
// containers running.
func (e *Environment) ContainerInspect(ctx context.Context) (types.ContainerJSON, error) {
configure(e.client)
// Support feature flagging of this functionality so that if something goes
// wrong for now it is easy enough for people to switch back to the older method
// of fetching stats.
if !fastEnabled {
return e.client.ContainerInspect(ctx, e.Id)
}
var st types.ContainerJSON
req, err := http.NewRequestWithContext(ctx, http.MethodGet, "/containers/"+e.Id+"/json", nil)
if err != nil {
return st, errors.WithStack(err)
}
if cli.proto == "unix" || cli.proto == "npipe" {
req.Host = "docker"
}
req.URL.Host = cli.host
req.URL.Scheme = cli.scheme
res, err := e.client.HTTPClient().Do(req)
if err != nil {
return st, errdefs.FromStatusCode(err, res.StatusCode)
}
body, err := io.ReadAll(res.Body)
if err != nil {
return st, errors.Wrap(err, "failed to read response body from Docker")
}
if err := parseErrorFromResponse(res, body); err != nil {
return st, errdefs.FromStatusCode(err, res.StatusCode)
}
if err := json.Unmarshal(body, &st); err != nil {
return st, errors.WithStack(err)
}
return st, nil
}
// parseErrorFromResponse is a re-implementation of Docker's
// client.checkResponseErr() function.
func parseErrorFromResponse(res *http.Response, body []byte) error {
if res.StatusCode >= 200 && res.StatusCode < 400 {
return nil
}
var ct string
if res.Header != nil {
ct = res.Header.Get("Content-Type")
}
var emsg string
if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" {
var errResp types.ErrorResponse
if err := json.Unmarshal(body, &errResp); err != nil {
return errors.WithStack(err)
}
emsg = strings.TrimSpace(errResp.Message)
} else {
emsg = strings.TrimSpace(string(body))
}
return errors.Wrap(errors.New(emsg), "Error response from daemon")
}

View File

@@ -3,7 +3,6 @@ package docker
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"strconv"
@@ -12,6 +11,7 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/buger/jsonparser"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
@@ -118,7 +118,7 @@ func (e *Environment) InSituUpdate() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
if _, err := e.client.ContainerInspect(ctx, e.Id); err != nil {
if _, err := e.ContainerInspect(ctx); err != nil {
// If the container doesn't exist for some reason there really isn't anything
// we can do to fix that in this process (it doesn't make sense at least). In those
// cases just return without doing anything since we still want to save the configuration
@@ -150,7 +150,7 @@ func (e *Environment) Create() error {
// If the container already exists don't hit the user with an error, just return
// the current information about it which is what we would do when creating the
// container anyways.
if _, err := e.client.ContainerInspect(context.Background(), e.Id); err == nil {
if _, err := e.ContainerInspect(context.Background()); err == nil {
return nil
} else if !client.IsErrNotFound(err) {
return errors.Wrap(err, "environment/docker: failed to inspect container")
@@ -364,11 +364,6 @@ func (e *Environment) scanOutput(reader io.ReadCloser) {
go e.followOutput()
}
type imagePullStatus struct {
Status string `json:"status"`
Progress string `json:"progress"`
}
// Pulls the image from Docker. If there is an error while pulling the image
// from the source but the image already exists locally, we will report that
// error to the logger but continue with the process.
@@ -454,12 +449,11 @@ func (e *Environment) ensureImageExists(image string) error {
scanner := bufio.NewScanner(out)
for scanner.Scan() {
s := imagePullStatus{}
fmt.Println(scanner.Text())
b := scanner.Bytes()
status, _ := jsonparser.GetString(b, "status")
progress, _ := jsonparser.GetString(b, "progress")
if err := json.Unmarshal(scanner.Bytes(), &s); err == nil {
e.Events().Publish(environment.DockerImagePullStatus, s.Status+" "+s.Progress)
}
e.Events().Publish(environment.DockerImagePullStatus, status+" "+progress)
}
if err := scanner.Err(); err != nil {

View File

@@ -10,7 +10,6 @@ import (
"github.com/apex/log"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote"
@@ -116,7 +115,7 @@ func (e *Environment) Events() *events.Bus {
// will work fine when using the container name as the lookup parameter in addition to the longer
// ID auto-assigned when the container is created.
func (e *Environment) Exists() (bool, error) {
_, err := e.client.ContainerInspect(context.Background(), e.Id)
_, err := e.ContainerInspect(context.Background())
if err != nil {
// If this error is because the container instance wasn't found via Docker we
// can safely ignore the error and just return false.
@@ -140,7 +139,7 @@ func (e *Environment) Exists() (bool, error) {
//
// @see docker/client/errors.go
func (e *Environment) IsRunning(ctx context.Context) (bool, error) {
c, err := e.client.ContainerInspect(ctx, e.Id)
c, err := e.ContainerInspect(ctx)
if err != nil {
return false, err
}
@@ -150,7 +149,7 @@ func (e *Environment) IsRunning(ctx context.Context) (bool, error) {
// Determine the container exit state and return the exit code and whether or not
// the container was killed by the OOM killer.
func (e *Environment) ExitState() (uint32, bool, error) {
c, err := e.client.ContainerInspect(context.Background(), e.Id)
c, err := e.ContainerInspect(context.Background())
if err != nil {
// I'm not entirely sure how this can happen to be honest. I tried deleting a
// container _while_ a server was running and wings gracefully saw the crash and

View File

@@ -66,7 +66,7 @@ func (e *Environment) Start(ctx context.Context) error {
}
}()
if c, err := e.client.ContainerInspect(ctx, e.Id); err != nil {
if c, err := e.ContainerInspect(ctx); err != nil {
// Do nothing if the container is not found, we just don't want to continue
// to the next block of code here. This check was inlined here to guard against
// a nil-pointer when checking c.State below.
@@ -235,7 +235,7 @@ func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
// Terminate forcefully terminates the container using the signal provided.
func (e *Environment) Terminate(signal os.Signal) error {
c, err := e.client.ContainerInspect(context.Background(), e.Id)
c, err := e.ContainerInspect(context.Background())
if err != nil {
// Treat missing containers as an okay error state, means it is obviously
// already terminated at this point.

View File

@@ -2,13 +2,13 @@ package docker
import (
"context"
"encoding/json"
"io"
"math"
"time"
"emperror.dev/errors"
"github.com/docker/docker/api/types"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/environment"
)
@@ -16,7 +16,7 @@ import (
// Uptime returns the current uptime of the container in milliseconds. If the
// container is not currently running this will return 0.
func (e *Environment) Uptime(ctx context.Context) (int64, error) {
ins, err := e.client.ContainerInspect(ctx, e.Id)
ins, err := e.ContainerInspect(ctx)
if err != nil {
return 0, errors.Wrap(err, "environment: could not inspect container")
}

5
go.mod
View File

@@ -45,6 +45,10 @@ require (
gopkg.in/yaml.v2 v2.4.0
)
require github.com/goccy/go-json v0.9.4
require golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 // indirect
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.5.0 // indirect
@@ -102,7 +106,6 @@ require (
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 // indirect
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect

5
go.sum
View File

@@ -371,6 +371,8 @@ github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn
github.com/go-playground/validator/v10 v10.8.0 h1:1kAa0fCrnpv+QYdkdcRzrRM7AyYs5o8+jZdJCz9xj6k=
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/goccy/go-json v0.9.4 h1:L8MLKG2mvVXiQu07qB6hmfqeSYQdOnqPot2GhsIwIaI=
github.com/goccy/go-json v0.9.4/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
@@ -1111,8 +1113,9 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 h1:WecRHqgE09JBkh/584XIE6PMz5KKE/vER4izNUi30AQ=
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=

View File

@@ -2,7 +2,6 @@ package parser
import (
"bufio"
"encoding/json"
"os"
"path/filepath"
"strconv"
@@ -14,6 +13,7 @@ import (
"github.com/buger/jsonparser"
"github.com/icza/dyno"
"github.com/magiconair/properties"
"github.com/goccy/go-json"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v2"
@@ -80,8 +80,8 @@ func (cp ConfigurationParser) String() string {
return string(cp)
}
// Defines a configuration file for the server startup. These will be looped over
// and modified before the server finishes booting.
// ConfigurationFile defines a configuration file for the server startup. These
// will be looped over and modified before the server finishes booting.
type ConfigurationFile struct {
FileName string `json:"file"`
Parser ConfigurationParser `json:"parser"`
@@ -92,12 +92,10 @@ type ConfigurationFile struct {
configuration []byte
}
// Custom unmarshaler for configuration files. If there is an error while parsing out the
// replacements, don't fail the entire operation, just log a global warning so someone can
// find the issue, and return an empty array of replacements.
//
// I imagine people will notice configuration replacement isn't working correctly and then
// the logs should help better expose that issue.
// UnmarshalJSON is a custom unmarshaler for configuration files. If there is an
// error while parsing out the replacements, don't fail the entire operation,
// just log a global warning so someone can find the issue, and return an empty
// array of replacements.
func (f *ConfigurationFile) UnmarshalJSON(data []byte) error {
var m map[string]*json.RawMessage
if err := json.Unmarshal(data, &m); err != nil {

View File

@@ -3,7 +3,6 @@ package remote
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
@@ -14,6 +13,7 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/cenkalti/backoff/v4"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
)

View File

@@ -1,11 +1,11 @@
package remote
import (
"encoding/json"
"regexp"
"strings"
"github.com/apex/log"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/parser"
)

View File

@@ -2,7 +2,6 @@ package downloader
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
@@ -15,6 +14,7 @@ import (
"emperror.dev/errors"
"github.com/google/uuid"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/server"
)

View File

@@ -53,6 +53,7 @@ func postServerPower(c *gin.Context) {
var data struct {
Action server.PowerAction `json:"action"`
WaitSeconds int `json:"wait_seconds"`
}
if err := c.BindJSON(&data); err != nil {
@@ -83,12 +84,16 @@ func postServerPower(c *gin.Context) {
// we can immediately return a response from the server. Some of these actions
// can take quite some time, especially stopping or restarting.
go func(s *server.Server) {
if err := s.HandlePowerAction(data.Action, 30); err != nil {
if data.WaitSeconds < 0 || data.WaitSeconds > 300 {
data.WaitSeconds = 30
}
if err := s.HandlePowerAction(data.Action, data.WaitSeconds); err != nil {
if errors.Is(err, context.DeadlineExceeded) {
s.Log().WithField("action", data.Action).
Warn("could not acquire a lock while attempting to perform a power action")
s.Log().WithField("action", data.Action).WithField("error", err).Warn("could not process server power action")
} else if errors.Is(err, server.ErrIsRunning) {
// Do nothing, this isn't something we care about for logging,
} else {
s.Log().WithFields(log.Fields{"action": data, "error": err}).
s.Log().WithFields(log.Fields{"action": data.Action, "wait_seconds": data.WaitSeconds, "error": err}).
Error("encountered error processing a server power action in the background")
}
}
@@ -182,15 +187,7 @@ func deleteServer(c *gin.Context) {
// Immediately suspend the server to prevent a user from attempting
// to start it while this process is running.
s.Config().SetSuspended(true)
// Stop all running background tasks for this server that are using the context on
// the server struct. This will cancel any running install processes for the server
// as well.
s.CtxCancel()
s.Events().Destroy()
s.LogSink().Destroy()
s.InstallSink().Destroy()
s.Websockets().CancelAll()
s.CleanupForDestroy()
// Remove any pending remote file downloads for the server.
for _, dl := range downloader.ByServer(s.ID()) {

View File

@@ -2,11 +2,11 @@ package router
import (
"context"
"encoding/json"
"time"
"github.com/gin-gonic/gin"
ws "github.com/gorilla/websocket"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/websocket"

View File

@@ -1,13 +1,13 @@
package tokens
import (
"encoding/json"
"strings"
"sync"
"time"
"github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3"
"github.com/goccy/go-json"
)
// The time at which Wings was booted. No JWT's created before this time are allowed to

View File

@@ -2,11 +2,12 @@ package websocket
import (
"context"
"encoding/json"
"sync"
"time"
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/server"
)
@@ -91,8 +92,8 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
logOutput := make(chan []byte)
installOutput := make(chan []byte)
h.server.Events().On(eventChan, e...)
h.server.LogSink().On(logOutput)
h.server.InstallSink().On(installOutput)
h.server.Sink(server.LogSink).On(logOutput)
h.server.Sink(server.InstallSink).On(installOutput)
onError := func(evt string, err2 error) {
h.Logger().WithField("event", evt).WithField("error", err2).Error("failed to send event over server websocket")
@@ -148,8 +149,8 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
// These functions will automatically close the channel if it hasn't been already.
h.server.Events().Off(eventChan, e...)
h.server.LogSink().Off(logOutput)
h.server.InstallSink().Off(installOutput)
h.server.Sink(server.LogSink).Off(logOutput)
h.server.Sink(server.InstallSink).Off(installOutput)
// If the internal context is stopped it is either because the parent context
// got canceled or because we ran into an error. If the "err" variable is nil

View File

@@ -2,7 +2,6 @@ package websocket
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
@@ -14,6 +13,7 @@ import (
"github.com/gbrlsnchs/jwt/v3"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"

View File

@@ -1,12 +1,12 @@
package filesystem
import (
"encoding/json"
"os"
"strconv"
"time"
"github.com/gabriel-vasile/mimetype"
"github.com/goccy/go-json"
)
type Stat struct {

View File

@@ -507,9 +507,9 @@ func (ip *InstallationProcess) Execute() (string, error) {
return r.ID, nil
}
// Streams the output of the installation process to a log file in the server configuration
// directory, as well as to a websocket listener so that the process can be viewed in
// the panel by administrators.
// StreamOutput streams the output of the installation process to a log file in
// the server configuration directory, as well as to a websocket listener so
// that the process can be viewed in the panel by administrators.
func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error {
reader, err := ip.client.ContainerLogs(ctx, id, types.ContainerLogsOptions{
ShowStdout: true,
@@ -521,7 +521,7 @@ func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) erro
}
defer reader.Close()
err = system.ScanReader(reader, ip.Server.InstallSink().Push)
err = system.ScanReader(reader, ip.Server.Sink(InstallSink).Push)
if err != nil {
ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines")
}

View File

@@ -83,7 +83,7 @@ func (s *Server) processConsoleOutputEvent(v []byte) {
// If we are not throttled, go ahead and output the data.
if !t.Throttled() {
s.LogSink().Push(v)
s.Sink(LogSink).Push(v)
}
// Also pass the data along to the console output channel.
@@ -125,7 +125,7 @@ func (s *Server) StartEventListeners() {
l.Trigger()
}
s.emitProcUsage()
s.Events().Publish(StatsEvent, s.Proc())
}()
case e := <-docker:
go func() {

View File

@@ -2,7 +2,6 @@ package server
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
@@ -14,6 +13,7 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gammazero/workerpool"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"

View File

@@ -2,12 +2,13 @@ package server
import (
"context"
"fmt"
"os"
"sync"
"time"
"emperror.dev/errors"
"golang.org/x/sync/semaphore"
"github.com/google/uuid"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
)
@@ -40,19 +41,85 @@ func (pa PowerAction) IsStart() bool {
return pa == PowerActionStart || pa == PowerActionRestart
}
// ExecutingPowerAction checks if there is currently a power action being processed for the server.
type powerLocker struct {
mu sync.RWMutex
ch chan bool
}
func newPowerLocker() *powerLocker {
return &powerLocker{
ch: make(chan bool, 1),
}
}
type errPowerLockerLocked struct{}
func (e errPowerLockerLocked) Error() string {
return "cannot acquire a lock on the power state: already locked"
}
var ErrPowerLockerLocked error = errPowerLockerLocked{}
// IsLocked returns the current state of the locker channel. If there is
// currently a value in the channel, it is assumed to be locked.
func (pl *powerLocker) IsLocked() bool {
pl.mu.RLock()
defer pl.mu.RUnlock()
return len(pl.ch) == 1
}
// Acquire will acquire the power lock if it is not currently locked. If it is
// already locked, acquire will fail to acquire the lock, and will return false.
func (pl *powerLocker) Acquire() error {
pl.mu.Lock()
defer pl.mu.Unlock()
if len(pl.ch) == 1 {
return errors.WithStack(ErrPowerLockerLocked)
}
pl.ch <- true
return nil
}
// TryAcquire will attempt to acquire a power-lock until the context provided
// is canceled.
func (pl *powerLocker) TryAcquire(ctx context.Context) error {
select {
case pl.ch <- true:
return nil
case <-ctx.Done():
if err := ctx.Err(); err != nil {
return errors.WithStack(err)
}
return nil
}
}
// Release will drain the locker channel so that we can properly re-acquire it
// at a later time.
func (pl *powerLocker) Release() {
pl.mu.Lock()
if len(pl.ch) == 1 {
<-pl.ch
}
pl.mu.Unlock()
}
// Destroy cleans up the power locker by closing the channel.
func (pl *powerLocker) Destroy() {
pl.mu.Lock()
if pl.ch != nil {
if len(pl.ch) == 1 {
<-pl.ch
}
close(pl.ch)
}
pl.mu.Unlock()
}
// ExecutingPowerAction checks if there is currently a power action being
// processed for the server.
func (s *Server) ExecutingPowerAction() bool {
if s.powerLock == nil {
return false
}
ok := s.powerLock.TryAcquire(1)
if ok {
s.powerLock.Release(1)
}
// Remember, if we acquired a lock it means nothing was running.
return !ok
return s.powerLock.IsLocked()
}
// HandlePowerAction is a helper function that can receive a power action and then process the
@@ -63,22 +130,29 @@ func (s *Server) ExecutingPowerAction() bool {
// function rather than making direct calls to the start/stop/restart functions on the
// environment struct.
func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error {
if s.IsInstalling() {
if s.IsInstalling() || s.IsTransferring() || s.IsRestoring() {
if s.IsRestoring() {
return ErrServerIsRestoring
} else if s.IsTransferring() {
return ErrServerIsTransferring
}
return ErrServerIsInstalling
}
if s.IsTransferring() {
return ErrServerIsTransferring
lockId, _ := uuid.NewUUID()
log := s.Log().WithField("lock_id", lockId.String()).WithField("action", action)
cleanup := func() {
log.Info("releasing exclusive lock for power action")
s.powerLock.Release()
}
if s.IsRestoring() {
return ErrServerIsRestoring
}
if s.powerLock == nil {
s.powerLock = semaphore.NewWeighted(1)
var wait int
if len(waitSeconds) > 0 && waitSeconds[0] > 0 {
wait = waitSeconds[0]
}
log.WithField("wait_seconds", wait).Debug("acquiring power action lock for instance")
// Only attempt to acquire a lock on the process if this is not a termination event. We want to
// just allow those events to pass right through for good reason. If a server is currently trying
// to process a power action but has gotten stuck you still should be able to pass through the
@@ -87,33 +161,38 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
if action != PowerActionTerminate {
// Determines if we should wait for the lock or not. If a value greater than 0 is passed
// into this function we will wait that long for a lock to be acquired.
if len(waitSeconds) > 0 && waitSeconds[0] != 0 {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(waitSeconds[0]))
if wait > 0 {
ctx, cancel := context.WithTimeout(s.ctx, time.Second*time.Duration(wait))
defer cancel()
// Attempt to acquire a lock on the power action lock for up to 30 seconds. If more
// time than that passes an error will be propagated back up the chain and this
// request will be aborted.
if err := s.powerLock.Acquire(ctx, 1); err != nil {
return errors.WithMessage(err, "could not acquire lock on power state")
if err := s.powerLock.TryAcquire(ctx); err != nil {
return errors.Wrap(err, fmt.Sprintf("could not acquire lock on power action after %d seconds", wait))
}
} else {
// If no wait duration was provided we will attempt to immediately acquire the lock
// and bail out with a context deadline error if it is not acquired immediately.
if ok := s.powerLock.TryAcquire(1); !ok {
return errors.WithMessage(context.DeadlineExceeded, "could not acquire lock on power state")
if err := s.powerLock.Acquire(); err != nil {
return errors.Wrap(err, "failed to acquire exclusive lock for power actions")
}
}
// Release the lock once the process being requested has finished executing.
defer s.powerLock.Release(1)
log.Info("acquired exclusive lock on power actions, processing event...")
defer cleanup()
} else {
// Still try to acquire the lock if terminating, and it is available, just so that other power
// actions are blocked until it has completed. However, if it is unavailable we won't stop
// the entire process.
if ok := s.powerLock.TryAcquire(1); ok {
// If we managed to acquire the lock be sure to released it once this process is completed.
defer s.powerLock.Release(1)
// Still try to acquire the lock if terminating, and it is available, just so that
// other power actions are blocked until it has completed. However, if it cannot be
// acquired we won't stop the entire process.
//
// If we did successfully acquire the lock, make sure we release it once we're done
// executiong the power actions.
if err := s.powerLock.Acquire(); err == nil {
log.Info("acquired exclusive lock on power actions, processing event...")
defer cleanup()
} else {
log.Warn("failed to acquire exclusive lock, ignoring failure for termination event")
}
}

158
server/power_test.go Normal file
View File

@@ -0,0 +1,158 @@
package server
import (
"context"
"testing"
"time"
"emperror.dev/errors"
. "github.com/franela/goblin"
)
func TestPower(t *testing.T) {
g := Goblin(t)
g.Describe("PowerLocker", func() {
var pl *powerLocker
g.BeforeEach(func() {
pl = newPowerLocker()
})
g.Describe("PowerLocker#IsLocked", func() {
g.It("should return false when the channel is empty", func() {
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(pl.IsLocked()).IsFalse()
})
g.It("should return true when the channel is at capacity", func() {
pl.ch <- true
g.Assert(pl.IsLocked()).IsTrue()
<-pl.ch
g.Assert(pl.IsLocked()).IsFalse()
// We don't care what the channel value is, just that there is
// something in it.
pl.ch <- false
g.Assert(pl.IsLocked()).IsTrue()
g.Assert(cap(pl.ch)).Equal(1)
})
})
g.Describe("PowerLocker#Acquire", func() {
g.It("should acquire a lock when channel is empty", func() {
err := pl.Acquire()
g.Assert(err).IsNil()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
})
g.It("should return an error when the channel is full", func() {
pl.ch <- true
err := pl.Acquire()
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, ErrPowerLockerLocked)).IsTrue()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
})
})
g.Describe("PowerLocker#TryAcquire", func() {
g.It("should acquire a lock when channel is empty", func() {
g.Timeout(time.Second)
err := pl.TryAcquire(context.Background())
g.Assert(err).IsNil()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
g.Assert(pl.IsLocked()).IsTrue()
})
g.It("should block until context is canceled if channel is full", func() {
g.Timeout(time.Second)
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
defer cancel()
pl.ch <- true
err := pl.TryAcquire(ctx)
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, context.DeadlineExceeded)).IsTrue()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
g.Assert(pl.IsLocked()).IsTrue()
})
g.It("should block until lock can be acquired", func() {
g.Timeout(time.Second)
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*200)
defer cancel()
pl.Acquire()
go func() {
time.AfterFunc(time.Millisecond * 50, func() {
pl.Release()
})
}()
err := pl.TryAcquire(ctx)
g.Assert(err).IsNil()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
g.Assert(pl.IsLocked()).IsTrue()
})
})
g.Describe("PowerLocker#Release", func() {
g.It("should release when channel is full", func() {
pl.Acquire()
g.Assert(pl.IsLocked()).IsTrue()
pl.Release()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(0)
g.Assert(pl.IsLocked()).IsFalse()
})
g.It("should release when channel is empty", func() {
g.Assert(pl.IsLocked()).IsFalse()
pl.Release()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(0)
g.Assert(pl.IsLocked()).IsFalse()
})
})
g.Describe("PowerLocker#Destroy", func() {
g.It("should unlock and close the channel", func() {
pl.Acquire()
g.Assert(pl.IsLocked()).IsTrue()
pl.Destroy()
g.Assert(pl.IsLocked()).IsFalse()
defer func() {
r := recover()
g.Assert(r).IsNotNil()
g.Assert(r.(error).Error()).Equal("send on closed channel")
}()
pl.Acquire()
})
})
})
g.Describe("Server#ExecutingPowerAction", func() {
g.It("should return based on locker status", func() {
s := &Server{powerLock: newPowerLocker()}
g.Assert(s.ExecutingPowerAction()).IsFalse()
s.powerLock.Acquire()
g.Assert(s.ExecutingPowerAction()).IsTrue()
})
})
}

View File

@@ -50,7 +50,3 @@ func (ru *ResourceUsage) Reset() {
ru.Network.TxBytes = 0
ru.Network.RxBytes = 0
}
func (s *Server) emitProcUsage() {
s.Events().Publish(StatsEvent, s.Proc())
}

View File

@@ -2,7 +2,6 @@ package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
@@ -12,7 +11,7 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/creasty/defaults"
"golang.org/x/sync/semaphore"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
@@ -32,7 +31,7 @@ type Server struct {
ctxCancel *context.CancelFunc
emitterLock sync.Mutex
powerLock *semaphore.Weighted
powerLock *powerLocker
throttleOnce sync.Once
// Maintains the configuration for the server. This is the data that gets returned by the Panel
@@ -71,6 +70,8 @@ type Server struct {
wsBag *WebsocketBag
wsBagLocker sync.Mutex
sinks map[SinkName]*sinkPool
logSink *sinkPool
installSink *sinkPool
}
@@ -86,9 +87,11 @@ func New(client remote.Client) (*Server, error) {
installing: system.NewAtomicBool(false),
transferring: system.NewAtomicBool(false),
restoring: system.NewAtomicBool(false),
logSink: newSinkPool(),
installSink: newSinkPool(),
powerLock: newPowerLocker(),
sinks: map[SinkName]*sinkPool{
LogSink: newSinkPool(),
InstallSink: newSinkPool(),
},
}
if err := defaults.Set(&s); err != nil {
return nil, errors.Wrap(err, "server: could not set default values for struct")
@@ -100,6 +103,17 @@ func New(client remote.Client) (*Server, error) {
return &s, nil
}
// CleanupForDestroy stops all running background tasks for this server that are
// using the context on the server struct. This will cancel any running install
// processes for the server as well.
func (s *Server) CleanupForDestroy() {
s.CtxCancel()
s.Events().Destroy()
s.DestroyAllSinks()
s.Websockets().CancelAll()
s.powerLock.Destroy()
}
// ID returns the UUID for the server instance.
func (s *Server) ID() string {
return s.Config().GetUuid()
@@ -299,7 +313,7 @@ func (s *Server) OnStateChange() {
// views in the Panel correctly display 0.
if st == environment.ProcessOfflineState {
s.resources.Reset()
s.emitProcUsage()
s.Events().Publish(StatsEvent, s.Proc())
}
// If server was in an online state, and is now in an offline state we should handle
@@ -355,11 +369,3 @@ func (s *Server) ToAPIResponse() APIResponse {
Configuration: *s.Config(),
}
}
func (s *Server) LogSink() *sinkPool {
return s.logSink
}
func (s *Server) InstallSink() *sinkPool {
return s.installSink
}

View File

@@ -2,71 +2,116 @@ package server
import (
"sync"
"time"
)
// SinkName represents one of the registered sinks for a server.
type SinkName string
const (
// LogSink handles console output for game servers, including messages being
// sent via Wings to the console instance.
LogSink SinkName = "log"
// InstallSink handles installation output for a server.
InstallSink SinkName = "install"
)
// sinkPool represents a pool with sinks.
type sinkPool struct {
mx sync.RWMutex
mu sync.RWMutex
sinks []chan []byte
}
// newSinkPool returns a new empty sinkPool.
// newSinkPool returns a new empty sinkPool. A sink pool generally lives with a
// server instance for it's full lifetime.
func newSinkPool() *sinkPool {
return &sinkPool{}
}
// Off removes a sink from the pool.
// On adds a channel to the sink pool instance.
func (p *sinkPool) On(c chan []byte) {
p.mu.Lock()
p.sinks = append(p.sinks, c)
p.mu.Unlock()
}
// Off removes a given channel from the sink pool. If no matching sink is found
// this function is a no-op. If a matching channel is found, it will be removed.
func (p *sinkPool) Off(c chan []byte) {
p.mx.Lock()
defer p.mx.Unlock()
p.mu.Lock()
defer p.mu.Unlock()
sinks := p.sinks
for i, sink := range sinks {
if c != sink {
continue
}
// We need to maintain the order of the sinks in the slice we're tracking,
// so shift everything to the left, rather than changing the order of the
// elements.
copy(sinks[i:], sinks[i+1:])
sinks[len(sinks)-1] = nil
sinks = sinks[:len(sinks)-1]
p.sinks = sinks
// Avoid a panic if the sink channel is nil at this point.
if c != nil {
close(c)
}
return
}
}
// On adds a sink on the pool.
func (p *sinkPool) On(c chan []byte) {
p.mx.Lock()
defer p.mx.Unlock()
p.sinks = append(p.sinks, c)
}
// Destroy destroys the pool by removing and closing all sinks.
// Destroy destroys the pool by removing and closing all sinks and destroying
// all of the channels that are present.
func (p *sinkPool) Destroy() {
p.mx.Lock()
defer p.mx.Unlock()
p.mu.Lock()
defer p.mu.Unlock()
for _, c := range p.sinks {
if c != nil {
close(c)
}
}
p.sinks = nil
}
// Push pushes a message to all registered sinks.
func (p *sinkPool) Push(v []byte) {
p.mx.RLock()
// Push sends a given message to each of the channels registered in the pool.
func (p *sinkPool) Push(data []byte) {
p.mu.RLock()
// Attempt to send the data over to the channels. If the channel buffer is full,
// or otherwise blocked for some reason (such as being a nil channel), just discard
// the event data and move on to the next channel in the slice. If you don't
// implement the "default" on the select you'll block execution until the channel
// becomes unblocked, which is not what we want to do here.
for _, c := range p.sinks {
// TODO: should this be done in parallel?
select {
// Send the log output to the channel
case c <- v:
// Timeout after 100 milliseconds, this will cause the write to the channel to be cancelled.
case <-time.After(100 * time.Millisecond):
case c <- data:
default:
}
}
p.mx.RUnlock()
p.mu.RUnlock()
}
// Sink returns the instantiated and named sink for a server. If the sink has
// not been configured yet this function will cause a panic condition.
func (s *Server) Sink(name SinkName) *sinkPool {
sink, ok := s.sinks[name]
if !ok {
s.Log().Fatalf("attempt to access nil sink: %s", name)
}
return sink
}
// DestroyAllSinks iterates over all of the sinks configured for the server and
// destroys their instances. Note that this will cause a panic if you attempt
// to call Server.Sink() again after. This function is only used when a server
// is being deleted from the system.
func (s *Server) DestroyAllSinks() {
s.Log().Info("destroying all registered sinks for server instance")
for _, sink := range s.sinks {
sink.Destroy()
}
}

189
server/sink_test.go Normal file
View File

@@ -0,0 +1,189 @@
package server
import (
"reflect"
"sync"
"testing"
. "github.com/franela/goblin"
)
func MutexLocked(m *sync.RWMutex) bool {
v := reflect.ValueOf(m).Elem()
state := v.FieldByName("w").FieldByName("state")
return state.Int()&1 == 1 || v.FieldByName("readerCount").Int() > 0
}
func TestSink(t *testing.T) {
g := Goblin(t)
g.Describe("SinkPool#On", func() {
g.It("pushes additional channels to a sink", func() {
pool := &sinkPool{}
g.Assert(pool.sinks).IsZero()
c1 := make(chan []byte, 1)
pool.On(c1)
g.Assert(len(pool.sinks)).Equal(1)
g.Assert(MutexLocked(&pool.mu)).IsFalse()
})
})
g.Describe("SinkPool#Off", func() {
var pool *sinkPool
g.BeforeEach(func() {
pool = &sinkPool{}
})
g.It("works when no sinks are registered", func() {
ch := make(chan []byte, 1)
g.Assert(pool.sinks).IsZero()
pool.Off(ch)
g.Assert(pool.sinks).IsZero()
g.Assert(MutexLocked(&pool.mu)).IsFalse()
})
g.It("does not remove any sinks when the channel does not match", func() {
ch := make(chan []byte, 1)
ch2 := make(chan []byte, 1)
pool.On(ch)
g.Assert(len(pool.sinks)).Equal(1)
pool.Off(ch2)
g.Assert(len(pool.sinks)).Equal(1)
g.Assert(pool.sinks[0]).Equal(ch)
g.Assert(MutexLocked(&pool.mu)).IsFalse()
})
g.It("removes a channel and maintains the order", func() {
channels := make([]chan []byte, 8)
for i := 0; i < len(channels); i++ {
channels[i] = make(chan []byte, 1)
pool.On(channels[i])
}
g.Assert(len(pool.sinks)).Equal(8)
pool.Off(channels[2])
g.Assert(len(pool.sinks)).Equal(7)
g.Assert(pool.sinks[1]).Equal(channels[1])
g.Assert(pool.sinks[2]).Equal(channels[3])
g.Assert(MutexLocked(&pool.mu)).IsFalse()
})
g.It("does not panic if a nil channel is provided", func() {
ch := make([]chan []byte, 1)
defer func () {
if r := recover(); r != nil {
g.Fail("removing a nil channel should not cause a panic")
}
}()
pool.On(ch[0])
pool.Off(ch[0])
g.Assert(len(pool.sinks)).Equal(0)
})
})
g.Describe("SinkPool#Push", func() {
var pool *sinkPool
g.BeforeEach(func() {
pool = &sinkPool{}
})
g.It("works when no sinks are registered", func() {
g.Assert(len(pool.sinks)).IsZero()
pool.Push([]byte("test"))
g.Assert(MutexLocked(&pool.mu)).IsFalse()
})
g.It("sends data to every registered sink", func() {
ch1 := make(chan []byte, 1)
ch2 := make(chan []byte, 1)
pool.On(ch1)
pool.On(ch2)
g.Assert(len(pool.sinks)).Equal(2)
b := []byte("test")
pool.Push(b)
g.Assert(MutexLocked(&pool.mu)).IsFalse()
g.Assert(<-ch1).Equal(b)
g.Assert(<-ch2).Equal(b)
g.Assert(len(pool.sinks)).Equal(2)
})
g.It("does not block if a channel is nil or otherwise full", func() {
ch := make([]chan []byte, 2)
ch[1] = make(chan []byte, 1)
ch[1] <- []byte("test")
pool.On(ch[0])
pool.On(ch[1])
pool.Push([]byte("testing"))
g.Assert(MutexLocked(&pool.mu)).IsFalse()
g.Assert(<-ch[1]).Equal([]byte("test"))
pool.Push([]byte("test2"))
g.Assert(<-ch[1]).Equal([]byte("test2"))
g.Assert(MutexLocked(&pool.mu)).IsFalse()
})
})
g.Describe("SinkPool#Destroy", func() {
var pool *sinkPool
g.BeforeEach(func() {
pool = &sinkPool{}
})
g.It("works if no sinks are registered", func() {
pool.Destroy()
g.Assert(MutexLocked(&pool.mu)).IsFalse()
})
g.It("closes all channels fully", func() {
ch1 := make(chan []byte, 1)
ch2 := make(chan []byte, 1)
pool.On(ch1)
pool.On(ch2)
g.Assert(len(pool.sinks)).Equal(2)
pool.Destroy()
g.Assert(pool.sinks).IsZero()
defer func() {
r := recover()
g.Assert(r).IsNotNil()
g.Assert(r.(error).Error()).Equal("send on closed channel")
}()
ch1 <- []byte("test")
})
g.It("works when a sink channel is nil", func() {
ch := make([]chan []byte, 2)
pool.On(ch[0])
pool.On(ch[1])
pool.Destroy()
g.Assert(MutexLocked(&pool.mu)).IsFalse()
})
})
}

View File

@@ -119,6 +119,9 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system")
return nil, sftp.ErrSSHFxFailure
}
// Chown may or may not have been called in the touch function, so always do
// it at this point to avoid the file being improperly owned.
_ = h.server.Filesystem().Chown(request.Filepath)
return f, nil
}

View File

@@ -4,7 +4,6 @@ import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"strconv"
@@ -12,6 +11,7 @@ import (
"time"
"emperror.dev/errors"
"github.com/goccy/go-json"
)
var (
@@ -19,6 +19,11 @@ var (
crr = []byte("\r\n")
)
// The maximum size of the buffer used to send output over the console to
// clients. Once this length is reached, the line will be truncated and sent
// as is.
var maxBufferSize = 64 * 1024
// FirstNotEmpty returns the first string passed in that is not an empty value.
func FirstNotEmpty(v ...string) string {
for _, val := range v {
@@ -37,12 +42,22 @@ func MustInt(v string) int {
return i
}
// ScanReader reads up to 64KB of line from the reader and emits that value
// over the websocket. If a line exceeds that size, it is truncated and only that
// amount is sent over.
func ScanReader(r io.Reader, callback func(line []byte)) error {
br := bufio.NewReader(r)
// Based on benchmarking this seems to be the best size for the reader buffer
// to maintain fast enough workflows without hammering the CPU for allocations.
//
// Additionally, most games are outputting a high-frequency of smaller lines,
// rather than a bunch of massive lines. This allocation amount is the total
// number of bytes being output for each call to ReadLine() before it moves on
// to the next data pull.
br := bufio.NewReaderSize(r, 256)
// Avoid constantly re-allocating memory when we're flooding lines through this
// function by using the same buffer for the duration of the call and just truncating
// the value back to 0 every loop.
buf := &bytes.Buffer{}
var buf bytes.Buffer
for {
buf.Reset()
var err error
@@ -52,32 +67,54 @@ func ScanReader(r io.Reader, callback func(line []byte)) error {
for {
// Read the line and write it to the buffer.
line, isPrefix, err = br.ReadLine()
// Certain games like Minecraft output absolutely random carriage returns in the output seemingly
// in line with that it thinks is the terminal size. Those returns break a lot of output handling,
// so we'll just replace them with proper new-lines and then split it later and send each line as
// its own event in the response.
buf.Write(bytes.Replace(line, cr, crr, -1))
// Finish this loop and begin outputting the line if there is no prefix (the line fit into
// the default buffer), or if we hit the end of the line.
line = bytes.Replace(line, cr, crr, -1)
ns := buf.Len() + len(line)
// If the length of the line value and the current value in the buffer will
// exceed the maximum buffer size, chop it down to hit the maximum size and
// then send that data over the socket before ending this loop.
//
// This ensures that we send as much data as possible, without allowing very
// long lines to grow the buffer size excessively and potentially DOS the Wings
// instance. If the line is not too long, just store the whole value into the
// buffer. This is kind of a re-implementation of the bufio.Scanner.Scan() logic
// without triggering an error when you exceed this buffer size.
if ns > maxBufferSize {
buf.Write(line[:len(line)-(ns-maxBufferSize)])
break
} else {
buf.Write(line)
}
// Finish this loop and begin outputting the line if there is no prefix
// (the line fit into the default buffer), or if we hit the end of the line.
if !isPrefix || err == io.EOF {
break
}
// If we encountered an error with something in ReadLine that was not an EOF just abort
// the entire process here.
// If we encountered an error with something in ReadLine that was not an
// EOF just abort the entire process here.
if err != nil {
return err
}
}
// Ensure that the scanner is always able to read the last line.
_, _ = buf.Write([]byte("\r\n"))
// Publish the line for this loop. Break on new-line characters so every line is sent as a single
// output event, otherwise you get funky handling in the browser console.
s := bufio.NewScanner(buf)
for s.Scan() {
callback(s.Bytes())
// Send the full buffer length over to the event handler to be emitted in
// the websocket. The front-end can handle the linebreaks in the middle of
// the output, it simply expects that the end of the event emit is a newline.
if buf.Len() > 0 {
// You need to make a copy of the buffer here because the callback will encounter
// a race condition since "buf.Bytes()" is going to be by-reference if passed directly.
c := make([]byte, buf.Len())
copy(c, buf.Bytes())
callback(c)
}
// If the error we got previously that lead to the line being output is an io.EOF we want to
// exit the entire looping process.
// If the error we got previously that lead to the line being output is
// an io.EOF we want to exit the entire looping process.
if err == io.EOF {
break
}

59
system/utils_test.go Normal file
View File

@@ -0,0 +1,59 @@
package system
import (
"math/rand"
"strings"
"testing"
"time"
. "github.com/franela/goblin"
)
func Test_Utils(t *testing.T) {
g := Goblin(t)
g.Describe("ScanReader", func() {
g.BeforeEach(func() {
maxBufferSize = 10
})
g.It("should truncate and return long lines", func() {
reader := strings.NewReader("hello world this is a long line\nof text that should be truncated\nnot here\nbut definitely on this line")
var lines []string
err := ScanReader(reader, func(line []byte) {
lines = append(lines, string(line))
})
g.Assert(err).IsNil()
g.Assert(lines).Equal([]string{"hello worl", "of text th", "not here", "but defini"})
})
g.It("should replace cariage returns with newlines", func() {
reader := strings.NewReader("test\rstring\r\nanother\rline\nhodor\r\r\rheld the door\nmaterial gourl\n")
var lines []string
err := ScanReader(reader, func(line []byte) {
lines = append(lines, string(line))
})
g.Assert(err).IsNil()
g.Assert(lines).Equal([]string{"test\rstrin", "another\rli", "hodor\r\r\rhe", "material g"})
})
})
}
func Benchmark_ScanReader(b *testing.B) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
var str string
for i := 0; i < 10; i++ {
str += strings.Repeat("hello \rworld", r.Intn(2000)) + "\n"
}
reader := strings.NewReader(str)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = ScanReader(reader, func(line []byte) {
// no op
})
}
}