Compare commits

...

15 Commits

Author SHA1 Message Date
Pterodactyl CI
be5ad761ea bump version for release 2022-01-31 01:31:09 +00:00
Dane Everitt
4b702052c7 Update CHANGELOG.md 2022-01-30 20:27:26 -05:00
Dane Everitt
7ee6c48fb0 Use a more efficient logging format for containers
JSON has a huge amount of overhead from Docker when we're trying to process large amounts of log data. It makes more sense to just use a better format.
2022-01-30 19:51:23 -05:00
Dane Everitt
2b2b5200eb Rewrite console throttling logic; drop complex timer usage and use a very simple throttle
This also removes server process termination logic when a server is breaching the output limits. It simply continues to efficiently throttle the console output.
2022-01-30 19:31:04 -05:00
Dane Everitt
fb73d5dbbf Always run pprof when running debug through makefile 2022-01-30 15:11:17 -05:00
Dane Everitt
fd7ec2aaac Generate normal and debug artifacts 2022-01-30 15:06:56 -05:00
Dane Everitt
c3df8d2309 Add support for proper use of pprof 2022-01-30 14:50:37 -05:00
Dane Everitt
1965e68a78 Include debug symbols in non-release binaries 2022-01-30 14:05:55 -05:00
Dane Everitt
3208b8579b Add test coverage 2022-01-30 13:58:36 -05:00
Dane Everitt
c4ee82c4dc Code cleanup, providing better commentary to decisions 2022-01-30 12:56:25 -05:00
Dane Everitt
0ec0fffa4d Handle future scenarios where we forgot to add a listener 2022-01-30 11:58:53 -05:00
Dane Everitt
57daf0889a Cleanup logic for updating stats to avoid calling mutex outside of file 2022-01-30 11:55:59 -05:00
Dane Everitt
d7c7155802 Make the powerlocker logic a little more idiomatic 2022-01-30 11:46:27 -05:00
Dane Everitt
11ae5e69ed Improve performance of console output watcher; work directly with bytes rather than string conversions 2022-01-30 11:28:06 -05:00
Dane Everitt
fab88a380e Use buffered channels and ring-buffer logic when processing console data
This change fixes pterodactyl/panel#3921 by implementing logic to drop the oldest message in a channel and push the newest message onto the channel when the channel buffer is full.

This is distinctly different than the previous implementation which just dropped the newest messages, leading to confusing behavior on the client side when a large amount of data was sent over the connection.

Up to 10ms per channel is allowed for blocking before falling back to the drop logic.
2022-01-30 10:55:45 -05:00
28 changed files with 785 additions and 513 deletions

View File

@@ -56,16 +56,21 @@ jobs:
CGO_ENABLED: 0 CGO_ENABLED: 0
SRC_PATH: github.com/pterodactyl/wings SRC_PATH: github.com/pterodactyl/wings
run: | run: |
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${{ matrix.goos }}_${{ matrix.goarch }} wings.go go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH} wings.go
upx build/wings_${{ matrix.goos }}_${{ matrix.goarch }} go build -v -trimpath -ldflags="-X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${GOOS}_${GOARCH}_debug wings.go
chmod +x build/wings_${{ matrix.goos }}_${{ matrix.goarch }} upx build/wings_${GOOS}_${{ matrix.goarch }}
chmod +x build/*
- name: Tests - name: Tests
run: go test ./...
- name: Tests (Race)
run: go test -race ./... run: go test -race ./...
- name: Upload Artifact - name: Upload Release Artifact
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v2
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }} if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
with: with:
name: wings_${{ matrix.goos }}_${{ matrix.goarch }} name: wings_linux_${{ matrix.goarch }}
path: build/wings_${{ matrix.goos }}_${{ matrix.goarch }} path: build/wings_linux_${{ matrix.goarch }}
- name: Upload Debug Artifact
uses: actions/upload-artifact@v2
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
with:
name: wings_linux_${{ matrix.goarch }}_debug
path: build/wings_linux_${{ matrix.goarch }}_debug

1
.gitignore vendored
View File

@@ -49,3 +49,4 @@ debug
.DS_Store .DS_Store
*.pprof *.pprof
*.pdf *.pdf
pprof.*

View File

@@ -1,5 +1,15 @@
# Changelog # Changelog
## v1.6.0
### Fixed
* Internal logic for processing a server start event has been adjusted to attach to the Docker container before attempting to start the container. This should fix issues where a server would get stuck after pulling the container image.
* Fixes a bug in the console output that was dropping console lines when a large number of lines were sent at once.
### Changed
* Removed the console throttle logic that would terminate a server instance that was sending too much data. This logic has been replaced with simpler logic that only throttles the console, it does not try to terminate the server. In addition, this change has reduced the number of go-routines needed by the application and dramatically simplified internal logic.
* Removed the `--profiler` flag and replaced it with `--pprof` which will start an internal server listening on `localhost:6060` allowing you to use Go's standard `pprof` tooling.
* Replaced the `json` log driver for Docker containers with `local` to reduce the amount of overhead when it comes to streaming logs from instances.
## v1.5.6 ## v1.5.6
### Fixed ### Fixed
* Rewrote handler logic for the power actions lock to hopefully address issues people have been having when a server crashes and they're unable to start it again until restarting Wings. * Rewrote handler logic for the power actions lock to hopefully address issues people have been having when a server crashes and they're unable to start it again until restarting Wings.

View File

@@ -5,8 +5,8 @@ build:
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
debug: debug:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
sudo ./wings --debug --ignore-certificate-errors --config config.yml sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof
# Runs a remotly debuggable session for Wings allowing an IDE to connect and target # Runs a remotly debuggable session for Wings allowing an IDE to connect and target
# different breakpoints. # different breakpoints.

View File

@@ -7,6 +7,7 @@ import (
"fmt" "fmt"
log2 "log" log2 "log"
"net/http" "net/http"
_ "net/http/pprof"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -20,7 +21,6 @@ import (
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/gammazero/workerpool" "github.com/gammazero/workerpool"
"github.com/mitchellh/colorstring" "github.com/mitchellh/colorstring"
"github.com/pkg/profile"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"golang.org/x/crypto/acme" "golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert" "golang.org/x/crypto/acme/autocert"
@@ -75,7 +75,8 @@ func init() {
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode") rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
// Flags specifically used when running the API. // Flags specifically used when running the API.
rootCommand.Flags().String("profiler", "", "the profiler to run for this instance") rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt") rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate") rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls") rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
@@ -86,25 +87,6 @@ func init() {
} }
func rootCmdRun(cmd *cobra.Command, _ []string) { func rootCmdRun(cmd *cobra.Command, _ []string) {
switch cmd.Flag("profiler").Value.String() {
case "cpu":
defer profile.Start(profile.CPUProfile).Stop()
case "mem":
defer profile.Start(profile.MemProfile).Stop()
case "alloc":
defer profile.Start(profile.MemProfile, profile.MemProfileAllocs).Stop()
case "heap":
defer profile.Start(profile.MemProfile, profile.MemProfileHeap).Stop()
case "routines":
defer profile.Start(profile.GoroutineProfile).Stop()
case "mutex":
defer profile.Start(profile.MutexProfile).Stop()
case "threads":
defer profile.Start(profile.ThreadcreationProfile).Stop()
case "block":
defer profile.Start(profile.BlockProfile).Stop()
}
printLogo() printLogo()
log.Debug("running in debug mode") log.Debug("running in debug mode")
log.WithField("config_file", configPath).Info("loading configuration from file") log.WithField("config_file", configPath).Info("loading configuration from file")
@@ -325,6 +307,14 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
TLSConfig: config.DefaultTLSConfig, TLSConfig: config.DefaultTLSConfig,
} }
profile, _ := cmd.Flags().GetBool("pprof")
if profile {
profilePort, _ := cmd.Flags().GetInt("pprof-port")
go func() {
http.ListenAndServe(fmt.Sprintf("localhost:%d", profilePort), nil)
}()
}
// Check if the server should run with TLS but using autocert. // Check if the server should run with TLS but using autocert.
if autotls { if autotls {
m := autocert.Manager{ m := autocert.Manager{

View File

@@ -222,26 +222,14 @@ type ConsoleThrottles struct {
// Whether or not the throttler is enabled for this instance. // Whether or not the throttler is enabled for this instance.
Enabled bool `json:"enabled" yaml:"enabled" default:"true"` Enabled bool `json:"enabled" yaml:"enabled" default:"true"`
// The total number of lines that can be output in a given LineResetInterval period before // The total number of lines that can be output in a given Period period before
// a warning is triggered and counted against the server. // a warning is triggered and counted against the server.
Lines uint64 `json:"lines" yaml:"lines" default:"2000"` Lines uint64 `json:"lines" yaml:"lines" default:"2000"`
// The total number of throttle activations that can accumulate before a server is considered
// to be breaching and will be stopped. This value is decremented by one every DecayInterval.
MaximumTriggerCount uint64 `json:"maximum_trigger_count" yaml:"maximum_trigger_count" default:"5"`
// The amount of time after which the number of lines processed is reset to 0. This runs in // The amount of time after which the number of lines processed is reset to 0. This runs in
// a constant loop and is not affected by the current console output volumes. By default, this // a constant loop and is not affected by the current console output volumes. By default, this
// will reset the processed line count back to 0 every 100ms. // will reset the processed line count back to 0 every 100ms.
LineResetInterval uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"` Period uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"`
// The amount of time in milliseconds that must pass without an output warning being triggered
// before a throttle activation is decremented.
DecayInterval uint64 `json:"decay_interval" yaml:"decay_interval" default:"10000"`
// The amount of time that a server is allowed to be stopping for before it is terminated
// forcefully if it triggers output throttles.
StopGracePeriod uint `json:"stop_grace_period" yaml:"stop_grace_period" default:"15"`
} }
type Configuration struct { type Configuration struct {

View File

@@ -16,7 +16,7 @@ import (
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/docker/docker/daemon/logger/jsonfilelog" "github.com/docker/docker/daemon/logger/local"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
@@ -38,13 +38,13 @@ func (nw noopWriter) Write(b []byte) (int, error) {
} }
// Attach attaches to the docker container itself and ensures that we can pipe // Attach attaches to the docker container itself and ensures that we can pipe
// data in and out of the process stream. This should not be used for reading // data in and out of the process stream. This should always be called before
// console data as you *will* miss important output at the beginning because of // you have started the container, but after you've ensured it exists.
// the time delay with attaching to the output.
// //
// Calling this function will poll resources for the container in the background // Calling this function will poll resources for the container in the background
// until the provided context is canceled by the caller. Failure to cancel said // until the container is stopped. The context provided to this function is used
// context will cause background memory leaks as the goroutine will not exit. // for the purposes of attaching to the container, a seecond context is created
// within the function for managing polling.
func (e *Environment) Attach(ctx context.Context) error { func (e *Environment) Attach(ctx context.Context) error {
if e.IsAttached() { if e.IsAttached() {
return nil return nil
@@ -216,11 +216,12 @@ func (e *Environment) Create() error {
// since we only need it for the last few hundred lines of output and don't care // since we only need it for the last few hundred lines of output and don't care
// about anything else in it. // about anything else in it.
LogConfig: container.LogConfig{ LogConfig: container.LogConfig{
Type: jsonfilelog.Name, Type: local.Name,
Config: map[string]string{ Config: map[string]string{
"max-size": "5m", "max-size": "5m",
"max-file": "1", "max-file": "1",
"compress": "false", "compress": "false",
"mode": "non-blocking",
}, },
}, },

View File

@@ -27,7 +27,6 @@ var _ environment.ProcessEnvironment = (*Environment)(nil)
type Environment struct { type Environment struct {
mu sync.RWMutex mu sync.RWMutex
eventMu sync.Once
// The public identifier for this environment. In this case it is the Docker container // The public identifier for this environment. In this case it is the Docker container
// name that will be used for all instances created under it. // name that will be used for all instances created under it.
@@ -73,6 +72,7 @@ func New(id string, m *Metadata, c *environment.Configuration) (*Environment, er
meta: m, meta: m,
client: cli, client: cli,
st: system.NewAtomicString(environment.ProcessOfflineState), st: system.NewAtomicString(environment.ProcessOfflineState),
emitter: events.NewBus(),
} }
return e, nil return e, nil
@@ -86,34 +86,33 @@ func (e *Environment) Type() string {
return "docker" return "docker"
} }
// Set if this process is currently attached to the process. // SetStream sets the current stream value from the Docker client. If a nil
// value is provided we assume that the stream is no longer operational and the
// instance is effectively offline.
func (e *Environment) SetStream(s *types.HijackedResponse) { func (e *Environment) SetStream(s *types.HijackedResponse) {
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock()
e.stream = s e.stream = s
e.mu.Unlock()
} }
// Determine if the this process is currently attached to the container. // IsAttached determine if the this process is currently attached to the
// container instance by checking if the stream is nil or not.
func (e *Environment) IsAttached() bool { func (e *Environment) IsAttached() bool {
e.mu.RLock() e.mu.RLock()
defer e.mu.RUnlock() defer e.mu.RUnlock()
return e.stream != nil return e.stream != nil
} }
// Events returns an event bus for the environment.
func (e *Environment) Events() *events.Bus { func (e *Environment) Events() *events.Bus {
e.eventMu.Do(func() {
e.emitter = events.NewBus()
})
return e.emitter return e.emitter
} }
// Determines if the container exists in this environment. The ID passed through should be the // Exists determines if the container exists in this environment. The ID passed
// server UUID since containers are created utilizing the server UUID as the name and docker // through should be the server UUID since containers are created utilizing the
// will work fine when using the container name as the lookup parameter in addition to the longer // server UUID as the name and docker will work fine when using the container
// ID auto-assigned when the container is created. // name as the lookup parameter in addition to the longer ID auto-assigned when
// the container is created.
func (e *Environment) Exists() (bool, error) { func (e *Environment) Exists() (bool, error) {
_, err := e.ContainerInspect(context.Background()) _, err := e.ContainerInspect(context.Background())
if err != nil { if err != nil {
@@ -122,10 +121,8 @@ func (e *Environment) Exists() (bool, error) {
if client.IsErrNotFound(err) { if client.IsErrNotFound(err) {
return false, nil return false, nil
} }
return false, err return false, err
} }
return true, nil return true, nil
} }
@@ -146,7 +143,7 @@ func (e *Environment) IsRunning(ctx context.Context) (bool, error) {
return c.State.Running, nil return c.State.Running, nil
} }
// Determine the container exit state and return the exit code and whether or not // ExitState returns the container exit state, the exit code and whether or not
// the container was killed by the OOM killer. // the container was killed by the OOM killer.
func (e *Environment) ExitState() (uint32, bool, error) { func (e *Environment) ExitState() (uint32, bool, error) {
c, err := e.ContainerInspect(context.Background()) c, err := e.ContainerInspect(context.Background())
@@ -163,15 +160,13 @@ func (e *Environment) ExitState() (uint32, bool, error) {
if client.IsErrNotFound(err) { if client.IsErrNotFound(err) {
return 1, false, nil return 1, false, nil
} }
return 0, false, err return 0, false, err
} }
return uint32(c.State.ExitCode), c.State.OOMKilled, nil return uint32(c.State.ExitCode), c.State.OOMKilled, nil
} }
// Returns the environment configuration allowing a process to make modifications of the // Config returns the environment configuration allowing a process to make
// environment on the fly. // modifications of the environment on the fly.
func (e *Environment) Config() *environment.Configuration { func (e *Environment) Config() *environment.Configuration {
e.mu.RLock() e.mu.RLock()
defer e.mu.RUnlock() defer e.mu.RUnlock()
@@ -179,12 +174,11 @@ func (e *Environment) Config() *environment.Configuration {
return e.Configuration return e.Configuration
} }
// Sets the stop configuration for the environment. // SetStopConfiguration sets the stop configuration for the environment.
func (e *Environment) SetStopConfiguration(c remote.ProcessStopConfiguration) { func (e *Environment) SetStopConfiguration(c remote.ProcessStopConfiguration) {
e.mu.Lock() e.mu.Lock()
defer e.mu.Unlock()
e.meta.Stop = c e.meta.Stop = c
e.mu.Unlock()
} }
func (e *Environment) SetImage(i string) { func (e *Environment) SetImage(i string) {

View File

@@ -111,6 +111,13 @@ func (e *Environment) Start(ctx context.Context) error {
actx, cancel := context.WithTimeout(ctx, time.Second*30) actx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel() defer cancel()
// You must attach to the instance _before_ you start the container. If you do this
// in the opposite order you'll enter a deadlock condition where we're attached to
// the instance successfully, but the container has already stopped and you'll get
// the entire program into a very confusing state.
//
// By explicitly attaching to the instance before we start it, we can immediately
// react to errors/output stopping/etc. when starting.
if err := e.Attach(actx); err != nil { if err := e.Attach(actx); err != nil {
return err return err
} }

View File

@@ -1,6 +1,7 @@
package remote package remote
import ( import (
"bytes"
"regexp" "regexp"
"strings" "strings"
@@ -85,37 +86,38 @@ type SftpAuthResponse struct {
type OutputLineMatcher struct { type OutputLineMatcher struct {
// The raw string to match against. This may or may not be prefixed with // The raw string to match against. This may or may not be prefixed with
// regex: which indicates we want to match against the regex expression. // regex: which indicates we want to match against the regex expression.
raw string raw []byte
reg *regexp.Regexp reg *regexp.Regexp
} }
// Matches determines if a given string "s" matches the given line. // Matches determines if the provided byte string matches the given regex or
func (olm *OutputLineMatcher) Matches(s string) bool { // raw string provided to the matcher.
func (olm *OutputLineMatcher) Matches(s []byte) bool {
if olm.reg == nil { if olm.reg == nil {
return strings.Contains(s, olm.raw) return bytes.Contains(s, olm.raw)
} }
return olm.reg.Match(s)
return olm.reg.MatchString(s)
} }
// String returns the matcher's raw comparison string. // String returns the matcher's raw comparison string.
func (olm *OutputLineMatcher) String() string { func (olm *OutputLineMatcher) String() string {
return olm.raw return string(olm.raw)
} }
// UnmarshalJSON unmarshals the startup lines into individual structs for easier // UnmarshalJSON unmarshals the startup lines into individual structs for easier
// matching abilities. // matching abilities.
func (olm *OutputLineMatcher) UnmarshalJSON(data []byte) error { func (olm *OutputLineMatcher) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &olm.raw); err != nil { var r string
if err := json.Unmarshal(data, &r); err != nil {
return err return err
} }
if strings.HasPrefix(olm.raw, "regex:") && len(olm.raw) > 6 { olm.raw = []byte(r)
r, err := regexp.Compile(strings.TrimPrefix(olm.raw, "regex:")) if bytes.HasPrefix(olm.raw, []byte("regex:")) && len(olm.raw) > 6 {
r, err := regexp.Compile(strings.TrimPrefix(string(olm.raw), "regex:"))
if err != nil { if err != nil {
log.WithField("error", err).WithField("raw", olm.raw).Warn("failed to compile output line marked as being regex") log.WithField("error", err).WithField("raw", string(olm.raw)).Warn("failed to compile output line marked as being regex")
} }
olm.reg = r olm.reg = r
} }

View File

@@ -89,8 +89,8 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
defer cancel() defer cancel()
eventChan := make(chan events.Event) eventChan := make(chan events.Event)
logOutput := make(chan []byte) logOutput := make(chan []byte, 8)
installOutput := make(chan []byte) installOutput := make(chan []byte, 4)
h.server.Events().On(eventChan, e...) h.server.Events().On(eventChan, e...)
h.server.Sink(server.LogSink).On(logOutput) h.server.Sink(server.LogSink).On(logOutput)
h.server.Sink(server.InstallSink).On(installOutput) h.server.Sink(server.InstallSink).On(installOutput)

View File

@@ -1,15 +1,11 @@
package server package server
import ( import (
"context"
"fmt" "fmt"
"sync" "sync"
"sync/atomic"
"time" "time"
"emperror.dev/errors"
"github.com/mitchellh/colorstring" "github.com/mitchellh/colorstring"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
) )
@@ -18,118 +14,8 @@ import (
// the configuration every time we need to send output along to the websocket for // the configuration every time we need to send output along to the websocket for
// a server. // a server.
var appName string var appName string
var appNameSync sync.Once var appNameSync sync.Once
var ErrTooMuchConsoleData = errors.New("console is outputting too much data")
type ConsoleThrottler struct {
mu sync.Mutex
config.ConsoleThrottles
// The total number of activations that have occurred thus far.
activations uint64
// The total number of lines that have been sent since the last reset timer period.
count uint64
// Wether or not the console output is being throttled. It is up to calling code to
// determine what to do if it is.
isThrottled *system.AtomicBool
// The total number of lines processed so far during the given time period.
timerCancel *context.CancelFunc
}
// Resets the state of the throttler.
func (ct *ConsoleThrottler) Reset() {
atomic.StoreUint64(&ct.count, 0)
atomic.StoreUint64(&ct.activations, 0)
ct.isThrottled.Store(false)
}
// Triggers an activation for a server. You can also decrement the number of activations
// by passing a negative number.
func (ct *ConsoleThrottler) markActivation(increment bool) uint64 {
if !increment {
if atomic.LoadUint64(&ct.activations) == 0 {
return 0
}
// This weird dohicky subtracts 1 from the activation count.
return atomic.AddUint64(&ct.activations, ^uint64(0))
}
return atomic.AddUint64(&ct.activations, 1)
}
// Determines if the console is currently being throttled. Calls to this function can be used to
// determine if output should be funneled along to the websocket processes.
func (ct *ConsoleThrottler) Throttled() bool {
return ct.isThrottled.Load()
}
// Starts a timer that runs in a seperate thread and will continually decrement the lines processed
// and number of activations, regardless of the current console message volume. All of the timers
// are canceled if the context passed through is canceled.
func (ct *ConsoleThrottler) StartTimer(ctx context.Context) {
system.Every(ctx, time.Duration(int64(ct.LineResetInterval))*time.Millisecond, func(_ time.Time) {
ct.isThrottled.Store(false)
atomic.StoreUint64(&ct.count, 0)
})
system.Every(ctx, time.Duration(int64(ct.DecayInterval))*time.Millisecond, func(_ time.Time) {
ct.markActivation(false)
})
}
// Handles output from a server's console. This code ensures that a server is not outputting
// an excessive amount of data to the console that could indicate a malicious or run-away process
// and lead to performance issues for other users.
//
// This was much more of a problem for the NodeJS version of the daemon which struggled to handle
// large volumes of output. However, this code is much more performant so I generally feel a lot
// better about it's abilities.
//
// However, extreme output is still somewhat of a DoS attack vector against this software since we
// are still logging it to the disk temporarily and will want to avoid dumping a huge amount of
// data all at once. These values are all configurable via the wings configuration file, however the
// defaults have been in the wild for almost two years at the time of this writing, so I feel quite
// confident in them.
//
// This function returns an error if the server should be stopped due to violating throttle constraints
// and a boolean value indicating if a throttle is being violated when it is checked.
func (ct *ConsoleThrottler) Increment(onTrigger func()) error {
if !ct.Enabled {
return nil
}
// Increment the line count and if we have now output more lines than are allowed, trigger a throttle
// activation. Once the throttle is triggered and has passed the kill at value we will trigger a server
// stop automatically.
if atomic.AddUint64(&ct.count, 1) >= ct.Lines && !ct.Throttled() {
ct.isThrottled.Store(true)
if ct.markActivation(true) >= ct.MaximumTriggerCount {
return ErrTooMuchConsoleData
}
onTrigger()
}
return nil
}
// Returns the throttler instance for the server or creates a new one.
func (s *Server) Throttler() *ConsoleThrottler {
s.throttleOnce.Do(func() {
s.throttler = &ConsoleThrottler{
isThrottled: system.NewAtomicBool(false),
ConsoleThrottles: config.Get().Throttles,
}
})
return s.throttler
}
// PublishConsoleOutputFromDaemon sends output to the server console formatted // PublishConsoleOutputFromDaemon sends output to the server console formatted
// to appear correctly as being sent from Wings. // to appear correctly as being sent from Wings.
func (s *Server) PublishConsoleOutputFromDaemon(data string) { func (s *Server) PublishConsoleOutputFromDaemon(data string) {
@@ -141,3 +27,55 @@ func (s *Server) PublishConsoleOutputFromDaemon(data string) {
colorstring.Color(fmt.Sprintf("[yellow][bold][%s Daemon]:[default] %s", appName, data)), colorstring.Color(fmt.Sprintf("[yellow][bold][%s Daemon]:[default] %s", appName, data)),
) )
} }
// Throttler returns the throttler instance for the server or creates a new one.
func (s *Server) Throttler() *ConsoleThrottle {
s.throttleOnce.Do(func() {
throttles := config.Get().Throttles
period := time.Duration(throttles.Period) * time.Millisecond
s.throttler = newConsoleThrottle(throttles.Lines, period)
s.throttler.strike = func() {
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Server is outputting console data too quickly -- throttling..."))
}
})
return s.throttler
}
type ConsoleThrottle struct {
limit *system.Rate
lock *system.Locker
strike func()
}
func newConsoleThrottle(lines uint64, period time.Duration) *ConsoleThrottle {
return &ConsoleThrottle{
limit: system.NewRate(lines, period),
lock: system.NewLocker(),
}
}
// Allow checks if the console is allowed to process more output data, or if too
// much has already been sent over the line. If there is too much output the
// strike callback function is triggered, but only if it has not already been
// triggered at this point in the process.
//
// If output is allowed, the lock on the throttler is released and the next time
// it is triggered the strike function will be re-executed.
func (ct *ConsoleThrottle) Allow() bool {
if !ct.limit.Try() {
if err := ct.lock.Acquire(); err == nil {
if ct.strike != nil {
ct.strike()
}
}
return false
}
ct.lock.Release()
return true
}
// Reset resets the console throttler internal rate limiter and overage counter.
func (ct *ConsoleThrottle) Reset() {
ct.limit.Reset()
}

62
server/console_test.go Normal file
View File

@@ -0,0 +1,62 @@
package server
import (
"testing"
"time"
"github.com/franela/goblin"
)
func TestName(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("ConsoleThrottler", func() {
g.It("keeps count of the number of overages in a time period", func() {
t := newConsoleThrottle(1, time.Second)
g.Assert(t.Allow()).IsTrue()
g.Assert(t.Allow()).IsFalse()
g.Assert(t.Allow()).IsFalse()
})
g.It("calls strike once per time period", func() {
t := newConsoleThrottle(1, time.Millisecond * 20)
var times int
t.strike = func() {
times = times + 1
}
t.Allow()
t.Allow()
t.Allow()
time.Sleep(time.Millisecond * 100)
t.Allow()
t.Reset()
t.Allow()
t.Allow()
t.Allow()
g.Assert(times).Equal(2)
})
g.It("is properly reset", func() {
t := newConsoleThrottle(10, time.Second)
for i := 0; i < 10; i++ {
g.Assert(t.Allow()).IsTrue()
}
g.Assert(t.Allow()).IsFalse()
t.Reset()
g.Assert(t.Allow()).IsTrue()
})
})
}
func BenchmarkConsoleThrottle(b *testing.B) {
t := newConsoleThrottle(10, time.Millisecond * 10)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t.Allow()
}
}

View File

@@ -1,13 +1,13 @@
package server package server
import ( import (
"bytes"
"regexp" "regexp"
"strconv" "strconv"
"sync" "sync"
"github.com/apex/log" "github.com/apex/log"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events" "github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote" "github.com/pterodactyl/wings/remote"
@@ -50,48 +50,34 @@ func (dsl *diskSpaceLimiter) Trigger() {
}) })
} }
// processConsoleOutputEvent handles output from a server's Docker container
// and runs through different limiting logic to ensure that spam console output
// does not cause negative effects to the system. This will also monitor the
// output lines to determine if the server is started yet, and if the output is
// not being throttled, will send the data over to the websocket.
func (s *Server) processConsoleOutputEvent(v []byte) { func (s *Server) processConsoleOutputEvent(v []byte) {
t := s.Throttler() // Always process the console output, but do this in a seperate thread since we
err := t.Increment(func() { // don't really care about side-effects from this call, and don't want it to block
s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.") // the console sending logic.
}) go s.onConsoleOutput(v)
// An error is only returned if the server has breached the thresholds set.
if err != nil {
// If the process is already stopping, just let it continue with that action rather than attempting
// to terminate again.
if s.Environment.State() != environment.ProcessStoppingState {
s.Environment.SetState(environment.ProcessStoppingState)
go func() { // If the console is being throttled, do nothing else with it, we don't want
s.Log().Warn("stopping server instance, violating throttle limits") // to waste time. This code previously terminated server instances after violating
s.PublishConsoleOutputFromDaemon("Your server is being stopped for outputting too much data in a short period of time.") // different throttle limits. That code was clunky and difficult to reason about,
// in addition to being a consistent pain point for users.
// Completely skip over server power actions and terminate the running instance. This gives the //
// server 15 seconds to finish stopping gracefully before it is forcefully terminated. // In the interest of building highly efficient software, that code has been removed
if err := s.Environment.WaitForStop(config.Get().Throttles.StopGracePeriod, true); err != nil { // here, and we'll rely on the host to detect bad actors through their own means.
// If there is an error set the process back to running so that this throttler is called if !s.Throttler().Allow() {
// again and hopefully kills the server. return
if s.Environment.State() != environment.ProcessOfflineState {
s.Environment.SetState(environment.ProcessRunningState)
}
s.Log().WithField("error", err).Error("failed to terminate environment after triggering throttle")
}
}()
}
} }
// If we are not throttled, go ahead and output the data. s.Sink(LogSink).Push(v)
if !t.Throttled() {
s.Sink(LogSink).Push(v)
}
// Also pass the data along to the console output channel.
s.onConsoleOutput(string(v))
} }
// StartEventListeners adds all the internal event listeners we want to use for a server. These listeners can only be // StartEventListeners adds all the internal event listeners we want to use for
// removed by deleting the server as they should last for the duration of the process' lifetime. // a server. These listeners can only be removed by deleting the server as they
// should last for the duration of the process' lifetime.
func (s *Server) StartEventListeners() { func (s *Server) StartEventListeners() {
state := make(chan events.Event) state := make(chan events.Event)
stats := make(chan events.Event) stats := make(chan events.Event)
@@ -114,13 +100,10 @@ func (s *Server) StartEventListeners() {
}() }()
case e := <-stats: case e := <-stats:
go func() { go func() {
// Update the server resource tracking object with the resources we got here. s.resources.UpdateStats(e.Data.(environment.Stats))
s.resources.mu.Lock()
s.resources.Stats = e.Data.(environment.Stats)
s.resources.mu.Unlock()
// If there is no disk space available at this point, trigger the server disk limiter logic // If there is no disk space available at this point, trigger the server
// which will start to stop the running instance. // disk limiter logic which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) { if !s.Filesystem().HasSpaceAvailable(true) {
l.Trigger() l.Trigger()
} }
@@ -134,8 +117,10 @@ func (s *Server) StartEventListeners() {
s.Events().Publish(InstallOutputEvent, e.Data) s.Events().Publish(InstallOutputEvent, e.Data)
case environment.DockerImagePullStarted: case environment.DockerImagePullStarted:
s.PublishConsoleOutputFromDaemon("Pulling Docker container image, this could take a few minutes to complete...") s.PublishConsoleOutputFromDaemon("Pulling Docker container image, this could take a few minutes to complete...")
default: case environment.DockerImagePullCompleted:
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image") s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
default:
s.Log().WithField("topic", e.Topic).Error("unhandled docker event topic")
} }
}() }()
} }
@@ -153,27 +138,34 @@ var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-z
// Custom listener for console output events that will check if the given line // Custom listener for console output events that will check if the given line
// of output matches one that should mark the server as started or not. // of output matches one that should mark the server as started or not.
func (s *Server) onConsoleOutput(data string) { func (s *Server) onConsoleOutput(data []byte) {
// Get the server's process configuration. if s.Environment.State() != environment.ProcessStartingState && !s.IsRunning() {
return
}
processConfiguration := s.ProcessConfiguration() processConfiguration := s.ProcessConfiguration()
// Make a copy of the data provided since it is by reference, otherwise you'll
// potentially introduce a race condition by modifying the value.
v := make([]byte, len(data))
copy(v, data)
// Check if the server is currently starting. // Check if the server is currently starting.
if s.Environment.State() == environment.ProcessStartingState { if s.Environment.State() == environment.ProcessStartingState {
// Check if we should strip ansi color codes. // Check if we should strip ansi color codes.
if processConfiguration.Startup.StripAnsi { if processConfiguration.Startup.StripAnsi {
// Strip ansi color codes from the data string. v = stripAnsiRegex.ReplaceAll(v, []byte(""))
data = stripAnsiRegex.ReplaceAllString(data, "")
} }
// Iterate over all the done lines. // Iterate over all the done lines.
for _, l := range processConfiguration.Startup.Done { for _, l := range processConfiguration.Startup.Done {
if !l.Matches(data) { if !l.Matches(v) {
continue continue
} }
s.Log().WithFields(log.Fields{ s.Log().WithFields(log.Fields{
"match": l.String(), "match": l.String(),
"against": strconv.QuoteToASCII(data), "against": strconv.QuoteToASCII(string(v)),
}).Debug("detected server in running state based on console line output") }).Debug("detected server in running state based on console line output")
// If the specific line of output is one that would mark the server as started, // If the specific line of output is one that would mark the server as started,
@@ -190,7 +182,7 @@ func (s *Server) onConsoleOutput(data string) {
if s.IsRunning() { if s.IsRunning() {
stop := processConfiguration.Stop stop := processConfiguration.Stop
if stop.Type == remote.ProcessStopCommand && data == stop.Value { if stop.Type == remote.ProcessStopCommand && bytes.Equal(v, []byte(stop.Value)) {
s.Environment.SetState(environment.ProcessOfflineState) s.Environment.SetState(environment.ProcessOfflineState)
} }
} }

View File

@@ -199,7 +199,6 @@ func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server,
} else { } else {
s.Environment = env s.Environment = env
s.StartEventListeners() s.StartEventListeners()
s.Throttler().StartTimer(s.Context())
} }
// If the server's data directory exists, force disk usage calculation. // If the server's data directory exists, force disk usage calculation.

View File

@@ -4,7 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"os" "os"
"sync"
"time" "time"
"emperror.dev/errors" "emperror.dev/errors"
@@ -41,81 +40,6 @@ func (pa PowerAction) IsStart() bool {
return pa == PowerActionStart || pa == PowerActionRestart return pa == PowerActionStart || pa == PowerActionRestart
} }
type powerLocker struct {
mu sync.RWMutex
ch chan bool
}
func newPowerLocker() *powerLocker {
return &powerLocker{
ch: make(chan bool, 1),
}
}
type errPowerLockerLocked struct{}
func (e errPowerLockerLocked) Error() string {
return "cannot acquire a lock on the power state: already locked"
}
var ErrPowerLockerLocked error = errPowerLockerLocked{}
// IsLocked returns the current state of the locker channel. If there is
// currently a value in the channel, it is assumed to be locked.
func (pl *powerLocker) IsLocked() bool {
pl.mu.RLock()
defer pl.mu.RUnlock()
return len(pl.ch) == 1
}
// Acquire will acquire the power lock if it is not currently locked. If it is
// already locked, acquire will fail to acquire the lock, and will return false.
func (pl *powerLocker) Acquire() error {
pl.mu.Lock()
defer pl.mu.Unlock()
if len(pl.ch) == 1 {
return errors.WithStack(ErrPowerLockerLocked)
}
pl.ch <- true
return nil
}
// TryAcquire will attempt to acquire a power-lock until the context provided
// is canceled.
func (pl *powerLocker) TryAcquire(ctx context.Context) error {
select {
case pl.ch <- true:
return nil
case <-ctx.Done():
if err := ctx.Err(); err != nil {
return errors.WithStack(err)
}
return nil
}
}
// Release will drain the locker channel so that we can properly re-acquire it
// at a later time.
func (pl *powerLocker) Release() {
pl.mu.Lock()
if len(pl.ch) == 1 {
<-pl.ch
}
pl.mu.Unlock()
}
// Destroy cleans up the power locker by closing the channel.
func (pl *powerLocker) Destroy() {
pl.mu.Lock()
if pl.ch != nil {
if len(pl.ch) == 1 {
<-pl.ch
}
close(pl.ch)
}
pl.mu.Unlock()
}
// ExecutingPowerAction checks if there is currently a power action being // ExecutingPowerAction checks if there is currently a power action being
// processed for the server. // processed for the server.
func (s *Server) ExecutingPowerAction() bool { func (s *Server) ExecutingPowerAction() bool {

View File

@@ -1,154 +1,18 @@
package server package server
import ( import (
"context"
"testing" "testing"
"time"
"emperror.dev/errors"
. "github.com/franela/goblin" . "github.com/franela/goblin"
"github.com/pterodactyl/wings/system"
) )
func TestPower(t *testing.T) { func TestPower(t *testing.T) {
g := Goblin(t) g := Goblin(t)
g.Describe("PowerLocker", func() {
var pl *powerLocker
g.BeforeEach(func() {
pl = newPowerLocker()
})
g.Describe("PowerLocker#IsLocked", func() {
g.It("should return false when the channel is empty", func() {
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(pl.IsLocked()).IsFalse()
})
g.It("should return true when the channel is at capacity", func() {
pl.ch <- true
g.Assert(pl.IsLocked()).IsTrue()
<-pl.ch
g.Assert(pl.IsLocked()).IsFalse()
// We don't care what the channel value is, just that there is
// something in it.
pl.ch <- false
g.Assert(pl.IsLocked()).IsTrue()
g.Assert(cap(pl.ch)).Equal(1)
})
})
g.Describe("PowerLocker#Acquire", func() {
g.It("should acquire a lock when channel is empty", func() {
err := pl.Acquire()
g.Assert(err).IsNil()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
})
g.It("should return an error when the channel is full", func() {
pl.ch <- true
err := pl.Acquire()
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, ErrPowerLockerLocked)).IsTrue()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
})
})
g.Describe("PowerLocker#TryAcquire", func() {
g.It("should acquire a lock when channel is empty", func() {
g.Timeout(time.Second)
err := pl.TryAcquire(context.Background())
g.Assert(err).IsNil()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
g.Assert(pl.IsLocked()).IsTrue()
})
g.It("should block until context is canceled if channel is full", func() {
g.Timeout(time.Second)
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
defer cancel()
pl.ch <- true
err := pl.TryAcquire(ctx)
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, context.DeadlineExceeded)).IsTrue()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
g.Assert(pl.IsLocked()).IsTrue()
})
g.It("should block until lock can be acquired", func() {
g.Timeout(time.Second)
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*200)
defer cancel()
pl.Acquire()
go func() {
time.AfterFunc(time.Millisecond * 50, func() {
pl.Release()
})
}()
err := pl.TryAcquire(ctx)
g.Assert(err).IsNil()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(1)
g.Assert(pl.IsLocked()).IsTrue()
})
})
g.Describe("PowerLocker#Release", func() {
g.It("should release when channel is full", func() {
pl.Acquire()
g.Assert(pl.IsLocked()).IsTrue()
pl.Release()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(0)
g.Assert(pl.IsLocked()).IsFalse()
})
g.It("should release when channel is empty", func() {
g.Assert(pl.IsLocked()).IsFalse()
pl.Release()
g.Assert(cap(pl.ch)).Equal(1)
g.Assert(len(pl.ch)).Equal(0)
g.Assert(pl.IsLocked()).IsFalse()
})
})
g.Describe("PowerLocker#Destroy", func() {
g.It("should unlock and close the channel", func() {
pl.Acquire()
g.Assert(pl.IsLocked()).IsTrue()
pl.Destroy()
g.Assert(pl.IsLocked()).IsFalse()
defer func() {
r := recover()
g.Assert(r).IsNotNil()
g.Assert(r.(error).Error()).Equal("send on closed channel")
}()
pl.Acquire()
})
})
})
g.Describe("Server#ExecutingPowerAction", func() { g.Describe("Server#ExecutingPowerAction", func() {
g.It("should return based on locker status", func() { g.It("should return based on locker status", func() {
s := &Server{powerLock: newPowerLocker()} s := &Server{powerLock: system.NewLocker()}
g.Assert(s.ExecutingPowerAction()).IsFalse() g.Assert(s.ExecutingPowerAction()).IsFalse()
s.powerLock.Acquire() s.powerLock.Acquire()

View File

@@ -38,6 +38,13 @@ func (s *Server) Proc() ResourceUsage {
return s.resources return s.resources
} }
// UpdateStats updates the current stats for the server's resource usage.
func (ru *ResourceUsage) UpdateStats(stats environment.Stats) {
ru.mu.Lock()
ru.Stats = stats
ru.mu.Unlock()
}
// Reset resets the usages values to zero, used when a server is stopped to ensure we don't hold // Reset resets the usages values to zero, used when a server is stopped to ensure we don't hold
// onto any values incorrectly. // onto any values incorrectly.
func (ru *ResourceUsage) Reset() { func (ru *ResourceUsage) Reset() {

View File

@@ -30,9 +30,8 @@ type Server struct {
ctx context.Context ctx context.Context
ctxCancel *context.CancelFunc ctxCancel *context.CancelFunc
emitterLock sync.Mutex emitterLock sync.Mutex
powerLock *powerLocker powerLock *system.Locker
throttleOnce sync.Once
// Maintains the configuration for the server. This is the data that gets returned by the Panel // Maintains the configuration for the server. This is the data that gets returned by the Panel
// such as build settings and container images. // such as build settings and container images.
@@ -64,7 +63,8 @@ type Server struct {
restoring *system.AtomicBool restoring *system.AtomicBool
// The console throttler instance used to control outputs. // The console throttler instance used to control outputs.
throttler *ConsoleThrottler throttler *ConsoleThrottle
throttleOnce sync.Once
// Tracks open websocket connections for the server. // Tracks open websocket connections for the server.
wsBag *WebsocketBag wsBag *WebsocketBag
@@ -87,7 +87,7 @@ func New(client remote.Client) (*Server, error) {
installing: system.NewAtomicBool(false), installing: system.NewAtomicBool(false),
transferring: system.NewAtomicBool(false), transferring: system.NewAtomicBool(false),
restoring: system.NewAtomicBool(false), restoring: system.NewAtomicBool(false),
powerLock: newPowerLocker(), powerLock: system.NewLocker(),
sinks: map[SinkName]*sinkPool{ sinks: map[SinkName]*sinkPool{
LogSink: newSinkPool(), LogSink: newSinkPool(),
InstallSink: newSinkPool(), InstallSink: newSinkPool(),
@@ -239,14 +239,6 @@ func (s *Server) ReadLogfile(len int) ([]string, error) {
return s.Environment.Readlog(len) return s.Environment.Readlog(len)
} }
// Determine if the server is bootable in it's current state or not. This will not
// indicate why a server is not bootable, only if it is.
func (s *Server) IsBootable() bool {
exists, _ := s.Environment.Exists()
return exists
}
// Initializes a server instance. This will run through and ensure that the environment // Initializes a server instance. This will run through and ensure that the environment
// for the server is setup, and that all of the necessary files are created. // for the server is setup, and that all of the necessary files are created.
func (s *Server) CreateEnvironment() error { func (s *Server) CreateEnvironment() error {

View File

@@ -2,6 +2,7 @@ package server
import ( import (
"sync" "sync"
"time"
) )
// SinkName represents one of the registered sinks for a server. // SinkName represents one of the registered sinks for a server.
@@ -79,20 +80,44 @@ func (p *sinkPool) Destroy() {
} }
// Push sends a given message to each of the channels registered in the pool. // Push sends a given message to each of the channels registered in the pool.
// This will use a Ring Buffer channel in order to avoid blocking the channel
// sends, and attempt to push though the most recent messages in the queue in
// favor of the oldest messages.
//
// If the channel becomes full and isn't being drained fast enough, this
// function will remove the oldest message in the channel, and then push the
// message that it got onto the end, effectively making the channel a rolling
// buffer.
//
// There is a potential for data to be lost when passing it through this
// function, but only in instances where the channel buffer is full and the
// channel is not drained fast enough, in which case dropping messages is most
// likely the best option anyways. This uses waitgroups to allow every channel
// to attempt its send concurrently thus making the total blocking time of this
// function "O(1)" instead of "O(n)".
func (p *sinkPool) Push(data []byte) { func (p *sinkPool) Push(data []byte) {
p.mu.RLock() p.mu.RLock()
// Attempt to send the data over to the channels. If the channel buffer is full, defer p.mu.RUnlock()
// or otherwise blocked for some reason (such as being a nil channel), just discard var wg sync.WaitGroup
// the event data and move on to the next channel in the slice. If you don't wg.Add(len(p.sinks))
// implement the "default" on the select you'll block execution until the channel
// becomes unblocked, which is not what we want to do here.
for _, c := range p.sinks { for _, c := range p.sinks {
select { go func(c chan []byte) {
case c <- data: defer wg.Done()
default: select {
} case c <- data:
case <-time.After(time.Millisecond * 10):
// If there is nothing in the channel to read, but we also cannot write
// to the channel, just skip over sending data. If we don't do this you'll
// end up blocking the application on the channel read below.
if len(c) == 0 {
break
}
<-c
c <- data
}
}(c)
} }
p.mu.RUnlock() wg.Wait()
} }
// Sink returns the instantiated and named sink for a server. If the sink has // Sink returns the instantiated and named sink for a server. If the sink has

View File

@@ -1,9 +1,11 @@
package server package server
import ( import (
"fmt"
"reflect" "reflect"
"sync" "sync"
"testing" "testing"
"time"
. "github.com/franela/goblin" . "github.com/franela/goblin"
) )
@@ -64,10 +66,10 @@ func TestSink(t *testing.T) {
g.It("removes a channel and maintains the order", func() { g.It("removes a channel and maintains the order", func() {
channels := make([]chan []byte, 8) channels := make([]chan []byte, 8)
for i := 0; i < len(channels); i++ { for i := 0; i < len(channels); i++ {
channels[i] = make(chan []byte, 1) channels[i] = make(chan []byte, 1)
pool.On(channels[i]) pool.On(channels[i])
} }
g.Assert(len(pool.sinks)).Equal(8) g.Assert(len(pool.sinks)).Equal(8)
@@ -81,10 +83,10 @@ func TestSink(t *testing.T) {
g.It("does not panic if a nil channel is provided", func() { g.It("does not panic if a nil channel is provided", func() {
ch := make([]chan []byte, 1) ch := make([]chan []byte, 1)
defer func () { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
g.Fail("removing a nil channel should not cause a panic") g.Fail("removing a nil channel should not cause a panic")
} }
}() }()
pool.On(ch[0]) pool.On(ch[0])
@@ -123,22 +125,67 @@ func TestSink(t *testing.T) {
g.Assert(len(pool.sinks)).Equal(2) g.Assert(len(pool.sinks)).Equal(2)
}) })
g.It("does not block if a channel is nil or otherwise full", func() { g.It("uses a ring-buffer to avoid blocking when the channel is full", func() {
ch := make([]chan []byte, 2) ch1 := make(chan []byte, 1)
ch[1] = make(chan []byte, 1) ch2 := make(chan []byte, 2)
ch[1] <- []byte("test") ch3 := make(chan []byte)
pool.On(ch[0]) // ch1 and ch2 are now full, and would block if the code doesn't account
pool.On(ch[1]) // for a full buffer.
ch1 <- []byte("pre-test")
ch2 <- []byte("pre-test")
ch2 <- []byte("pre-test 2")
pool.On(ch1)
pool.On(ch2)
pool.On(ch3)
pool.Push([]byte("testing")) pool.Push([]byte("testing"))
time.Sleep(time.Millisecond * 20)
g.Assert(MutexLocked(&pool.mu)).IsFalse() g.Assert(MutexLocked(&pool.mu)).IsFalse()
g.Assert(<-ch[1]).Equal([]byte("test")) // We expect that value previously in the channel to have been dumped
// and therefore only the value we pushed will be present. For ch2 we
// expect only the first message was dropped, and the second one is now
// the first in the out queue.
g.Assert(<-ch1).Equal([]byte("testing"))
g.Assert(<-ch2).Equal([]byte("pre-test 2"))
g.Assert(<-ch2).Equal([]byte("testing"))
// Because nothing in this test was listening for ch3, it would have
// blocked for the 10ms duration, and then been skipped over entirely
// because it had no length to try and push onto.
g.Assert(len(ch3)).Equal(0)
// Now, push again and expect similar results.
pool.Push([]byte("testing 2"))
time.Sleep(time.Millisecond * 20)
pool.Push([]byte("test2"))
g.Assert(<-ch[1]).Equal([]byte("test2"))
g.Assert(MutexLocked(&pool.mu)).IsFalse() g.Assert(MutexLocked(&pool.mu)).IsFalse()
g.Assert(<-ch1).Equal([]byte("testing 2"))
g.Assert(<-ch2).Equal([]byte("testing 2"))
})
g.It("can handle concurrent pushes FIFO", func() {
ch := make(chan []byte, 4)
pool.On(ch)
pool.On(make(chan []byte))
for i := 0; i < 100; i++ {
pool.Push([]byte(fmt.Sprintf("iteration %d", i)))
}
time.Sleep(time.Millisecond * 20)
g.Assert(MutexLocked(&pool.mu)).IsFalse()
g.Assert(len(ch)).Equal(4)
g.Timeout(time.Millisecond * 500)
g.Assert(<-ch).Equal([]byte("iteration 96"))
g.Assert(<-ch).Equal([]byte("iteration 97"))
g.Assert(<-ch).Equal([]byte("iteration 98"))
g.Assert(<-ch).Equal([]byte("iteration 99"))
g.Assert(len(ch)).Equal(0)
}) })
}) })

View File

@@ -1,3 +1,3 @@
package system package system
var Version = "develop" var Version = "1.6.0"

83
system/locker.go Normal file
View File

@@ -0,0 +1,83 @@
package system
import (
"context"
"sync"
"emperror.dev/errors"
)
var ErrLockerLocked = errors.Sentinel("locker: cannot acquire lock, already locked")
type Locker struct {
mu sync.RWMutex
ch chan bool
}
// NewLocker returns a new Locker instance.
func NewLocker() *Locker {
return &Locker{
ch: make(chan bool, 1),
}
}
// IsLocked returns the current state of the locker channel. If there is
// currently a value in the channel, it is assumed to be locked.
func (l *Locker) IsLocked() bool {
l.mu.RLock()
defer l.mu.RUnlock()
return len(l.ch) == 1
}
// Acquire will acquire the power lock if it is not currently locked. If it is
// already locked, acquire will fail to acquire the lock, and will return false.
func (l *Locker) Acquire() error {
l.mu.Lock()
defer l.mu.Unlock()
select {
case l.ch <- true:
default:
return ErrLockerLocked
}
return nil
}
// TryAcquire will attempt to acquire a power-lock until the context provided
// is canceled.
func (l *Locker) TryAcquire(ctx context.Context) error {
select {
case l.ch <- true:
return nil
case <-ctx.Done():
if err := ctx.Err(); err != nil {
return err
}
return nil
}
}
// Release will drain the locker channel so that we can properly re-acquire it
// at a later time. If the channel is not currently locked this function is a
// no-op and will immediately return.
func (l *Locker) Release() {
l.mu.Lock()
select {
case <-l.ch:
default:
}
l.mu.Unlock()
}
// Destroy cleans up the power locker by closing the channel.
func (l *Locker) Destroy() {
l.mu.Lock()
if l.ch != nil {
select {
case <-l.ch:
default:
}
close(l.ch)
}
l.mu.Unlock()
}

148
system/locker_test.go Normal file
View File

@@ -0,0 +1,148 @@
package system
import (
"context"
"testing"
"time"
"emperror.dev/errors"
. "github.com/franela/goblin"
)
func TestPower(t *testing.T) {
g := Goblin(t)
g.Describe("Locker", func() {
var l *Locker
g.BeforeEach(func() {
l = NewLocker()
})
g.Describe("PowerLocker#IsLocked", func() {
g.It("should return false when the channel is empty", func() {
g.Assert(cap(l.ch)).Equal(1)
g.Assert(l.IsLocked()).IsFalse()
})
g.It("should return true when the channel is at capacity", func() {
l.ch <- true
g.Assert(l.IsLocked()).IsTrue()
<-l.ch
g.Assert(l.IsLocked()).IsFalse()
// We don't care what the channel value is, just that there is
// something in it.
l.ch <- false
g.Assert(l.IsLocked()).IsTrue()
g.Assert(cap(l.ch)).Equal(1)
})
})
g.Describe("PowerLocker#Acquire", func() {
g.It("should acquire a lock when channel is empty", func() {
err := l.Acquire()
g.Assert(err).IsNil()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(1)
})
g.It("should return an error when the channel is full", func() {
l.ch <- true
err := l.Acquire()
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, ErrLockerLocked)).IsTrue()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(1)
})
})
g.Describe("PowerLocker#TryAcquire", func() {
g.It("should acquire a lock when channel is empty", func() {
g.Timeout(time.Second)
err := l.TryAcquire(context.Background())
g.Assert(err).IsNil()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(1)
g.Assert(l.IsLocked()).IsTrue()
})
g.It("should block until context is canceled if channel is full", func() {
g.Timeout(time.Second)
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
defer cancel()
l.ch <- true
err := l.TryAcquire(ctx)
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, context.DeadlineExceeded)).IsTrue()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(1)
g.Assert(l.IsLocked()).IsTrue()
})
g.It("should block until lock can be acquired", func() {
g.Timeout(time.Second)
ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*200)
defer cancel()
l.Acquire()
go func() {
time.AfterFunc(time.Millisecond * 50, func() {
l.Release()
})
}()
err := l.TryAcquire(ctx)
g.Assert(err).IsNil()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(1)
g.Assert(l.IsLocked()).IsTrue()
})
})
g.Describe("PowerLocker#Release", func() {
g.It("should release when channel is full", func() {
l.Acquire()
g.Assert(l.IsLocked()).IsTrue()
l.Release()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(0)
g.Assert(l.IsLocked()).IsFalse()
})
g.It("should release when channel is empty", func() {
g.Assert(l.IsLocked()).IsFalse()
l.Release()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(0)
g.Assert(l.IsLocked()).IsFalse()
})
})
g.Describe("PowerLocker#Destroy", func() {
g.It("should unlock and close the channel", func() {
l.Acquire()
g.Assert(l.IsLocked()).IsTrue()
l.Destroy()
g.Assert(l.IsLocked()).IsFalse()
defer func() {
r := recover()
g.Assert(r).IsNotNil()
g.Assert(r.(error).Error()).Equal("send on closed channel")
}()
l.Acquire()
})
})
})
}

50
system/rate.go Normal file
View File

@@ -0,0 +1,50 @@
package system
import (
"sync"
"time"
)
// Rate defines a rate limiter of n items (limit) per duration of time.
type Rate struct {
mu sync.Mutex
limit uint64
duration time.Duration
count uint64
last time.Time
}
func NewRate(limit uint64, duration time.Duration) *Rate {
return &Rate{
limit: limit,
duration: duration,
last: time.Now(),
}
}
// Try returns true if under the rate limit defined, or false if the rate limit
// has been exceeded for the current duration.
func (r *Rate) Try() bool {
r.mu.Lock()
defer r.mu.Unlock()
now := time.Now()
// If it has been more than the duration, reset the timer and count.
if now.Sub(r.last) > r.duration {
r.count = 0
r.last = now
}
if (r.count + 1) > r.limit {
return false
}
// Hit this once, and return.
r.count = r.count + 1
return true
}
// Reset resets the internal state of the rate limiter back to zero.
func (r *Rate) Reset() {
r.mu.Lock()
r.count = 0
r.last = time.Now()
r.mu.Unlock()
}

67
system/rate_test.go Normal file
View File

@@ -0,0 +1,67 @@
package system
import (
"testing"
"time"
. "github.com/franela/goblin"
)
func TestRate(t *testing.T) {
g := Goblin(t)
g.Describe("Rate", func() {
g.It("properly rate limits a bucket", func() {
r := NewRate(10, time.Millisecond*100)
for i := 0; i < 100; i++ {
ok := r.Try()
if i < 10 && !ok {
g.Failf("should not have allowed take on try %d", i)
} else if i >= 10 && ok {
g.Failf("should have blocked take on try %d", i)
}
}
})
g.It("handles rate limiting in chunks", func() {
var out []int
r := NewRate(12, time.Millisecond*10)
for i := 0; i < 100; i++ {
if i%20 == 0 {
// Give it time to recover.
time.Sleep(time.Millisecond * 10)
}
if r.Try() {
out = append(out, i)
}
}
g.Assert(len(out)).Equal(60)
g.Assert(out[0]).Equal(0)
g.Assert(out[12]).Equal(20)
g.Assert(out[len(out)-1]).Equal(91)
})
g.It("resets back to zero when called", func() {
r := NewRate(10, time.Second)
for i := 0; i < 100; i++ {
if i % 10 == 0 {
r.Reset()
}
g.Assert(r.Try()).IsTrue()
}
g.Assert(r.Try()).IsFalse("final attempt should not allow taking")
})
})
}
func BenchmarkRate_Try(b *testing.B) {
r := NewRate(10, time.Millisecond*100)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
r.Try()
}
}

View File

@@ -165,9 +165,9 @@ func (ab *AtomicBool) Store(v bool) {
ab.mu.Unlock() ab.mu.Unlock()
} }
// Stores the value "v" if the current value stored in the AtomicBool is the opposite // SwapIf stores the value "v" if the current value stored in the AtomicBool is
// boolean value. If successfully swapped, the response is "true", otherwise "false" // the opposite boolean value. If successfully swapped, the response is "true",
// is returned. // otherwise "false" is returned.
func (ab *AtomicBool) SwapIf(v bool) bool { func (ab *AtomicBool) SwapIf(v bool) bool {
ab.mu.Lock() ab.mu.Lock()
defer ab.mu.Unlock() defer ab.mu.Unlock()

View File

@@ -3,10 +3,12 @@ package system
import ( import (
"math/rand" "math/rand"
"strings" "strings"
"sync"
"testing" "testing"
"time" "time"
. "github.com/franela/goblin" . "github.com/franela/goblin"
"github.com/goccy/go-json"
) )
func Test_Utils(t *testing.T) { func Test_Utils(t *testing.T) {
@@ -40,6 +42,80 @@ func Test_Utils(t *testing.T) {
g.Assert(lines).Equal([]string{"test\rstrin", "another\rli", "hodor\r\r\rhe", "material g"}) g.Assert(lines).Equal([]string{"test\rstrin", "another\rli", "hodor\r\r\rhe", "material g"})
}) })
}) })
g.Describe("AtomicBool", func() {
var b *AtomicBool
g.BeforeEach(func() {
b = NewAtomicBool(false)
})
g.It("initalizes with the provided start value", func() {
b = NewAtomicBool(true)
g.Assert(b.Load()).IsTrue()
b = NewAtomicBool(false)
g.Assert(b.Load()).IsFalse()
})
g.Describe("AtomicBool#Store", func() {
g.It("stores the provided value", func() {
g.Assert(b.Load()).IsFalse()
b.Store(true)
g.Assert(b.Load()).IsTrue()
})
// This test makes no assertions, it just expects to not hit a race condition
// by having multiple things writing at the same time.
g.It("handles contention from multiple routines", func() {
var wg sync.WaitGroup
wg.Add(100)
for i := 0; i < 100; i++ {
go func(i int) {
b.Store(i%2 == 0)
wg.Done()
}(i)
}
wg.Wait()
})
})
g.Describe("AtomicBool#SwapIf", func() {
g.It("swaps the value out if different than what is stored", func() {
o := b.SwapIf(false)
g.Assert(o).IsFalse()
g.Assert(b.Load()).IsFalse()
o = b.SwapIf(true)
g.Assert(o).IsTrue()
g.Assert(b.Load()).IsTrue()
o = b.SwapIf(true)
g.Assert(o).IsFalse()
g.Assert(b.Load()).IsTrue()
o = b.SwapIf(false)
g.Assert(o).IsTrue()
g.Assert(b.Load()).IsFalse()
})
})
g.Describe("can be marshaled with JSON", func() {
type testStruct struct {
Value AtomicBool `json:"value"`
}
var o testStruct
err := json.Unmarshal([]byte(`{"value":true}`), &o)
g.Assert(err).IsNil()
g.Assert(o.Value.Load()).IsTrue()
b, err2 := json.Marshal(&o)
g.Assert(err2).IsNil()
g.Assert(b).Equal([]byte(`{"value":true}`))
})
})
} }
func Benchmark_ScanReader(b *testing.B) { func Benchmark_ScanReader(b *testing.B) {