Compare commits

...

31 Commits
v1.6.0 ... v2

Author SHA1 Message Date
Matthew Penner
ee280f1deb Merge branch 'develop' into v2 2022-04-01 09:48:24 -06:00
Matthew Penner
8bd1ebe360 go: update dependencies 2022-03-25 10:04:57 -06:00
Matthew Penner
93664fd112 router: add additional fields to remote file pull 2022-02-23 15:03:15 -07:00
Matthew Penner
3a738e44d6 run gofumpt 2022-02-23 15:02:19 -07:00
Noah van der Aa
067ca5bb60 Actually enforce upload file size limit (#122) 2022-02-21 14:59:28 -08:00
Dane Everitt
355c10c1e4 Update lockfile 2022-02-13 13:17:26 -05:00
Dane Everitt
04933c153b Merge branch 'develop' into v2 2022-02-13 13:10:36 -05:00
Dane Everitt
f85509a0c7 Support a custom tmp directory location 2022-02-13 11:59:53 -05:00
Dane Everitt
225a89be72 Update CHANGELOG.md 2022-02-05 12:41:53 -05:00
Dane Everitt
5d1d3cc9e6 Fix panic conditions 2022-02-05 12:11:00 -05:00
Dane Everitt
9f985ae044 Check for error before prefix; fixes abandoned routine; closes pterodactyl/panel#3911
Due to the order of the previous logic in ScanReader, an error not caused by EOF would effectively get ignored since an error will always be returned with `isPrefix` equal to false, thus triggering the first break, and error checking is not performed beyond that point.

Thus, canceling an installation process for a server while this process was running would hang the routine and cause the loop to run endlessly, even with a canceled context.
2022-02-05 11:56:17 -05:00
Dane Everitt
1372eba84e Remove unused function 2022-02-05 11:14:48 -05:00
Dane Everitt
879dcd8df5 Don't trigger a panic condition decoding event stats; closes pterodactyl/panel#3941 2022-02-05 11:06:11 -05:00
Dane Everitt
72476c61ec Simplify the event bus system; address pterodactyl/panel#3903
If my debugging is correct, this should address pterodactyl/panel#3903 in its entirety by addressing a few areas where it was possible for a channel to lock up and cause everything to block
2022-02-02 21:03:53 -05:00
Dane Everitt
0f2e9fcc0b Move the sink pool to be a shared tool 2022-02-02 19:16:34 -05:00
Dane Everitt
5c3e2c2c94 Fix failing test 2022-01-31 19:33:32 -05:00
Dane Everitt
7051feee01 Add additional debug points to server start process 2022-01-31 19:30:07 -05:00
Dane Everitt
cd67e5fdb9 Fix logic for context based environment stopping
Uses dual contexts to handle stopping using a timed context, and also terminating the entire process loop if the parent context gets canceled.
2022-01-31 19:09:08 -05:00
Dane Everitt
84bbefdadc Pass a context through to the start/stop/terminate actions 2022-01-31 18:40:15 -05:00
Dane Everitt
6a4178648f Return context cancelations as a locker locked error 2022-01-31 18:39:41 -05:00
Dane Everitt
1e52ffef64 Fix panic condition when no response is returned 2022-01-31 18:37:02 -05:00
Dane Everitt
0f9f80c181 Improve support for block/mutex contention in pprof 2022-01-30 21:02:18 -05:00
Matthew Penner
f1344f1a82 Merge branch 'develop' into v2 2022-01-18 13:12:09 -07:00
Matthew Penner
d874af85db tweaks to websocket event handling 2021-11-15 10:13:31 -07:00
Matthew Penner
54b6033392 update dependencies 2021-11-15 10:13:19 -07:00
Matthew Penner
10a2ffc0a7 Merge branch 'develop' into v2 2021-10-28 23:58:12 -06:00
Matthew Penner
ede1cdc76f Merge branch 'develop' into v2 2021-10-05 19:01:34 -06:00
Dane Everitt
9e98287172 Merge branch 'develop' into v2 2021-09-19 11:17:05 -07:00
Matthew Penner
6653466ca8 Merge branch 'develop' into v2 2021-09-01 16:33:32 -06:00
Matthew Penner
f54a736353 add support for systemd notify 2021-08-02 15:17:22 -06:00
Matthew Penner
d3360f0fd9 sftp: add support for ssh keys 2021-08-02 15:17:22 -06:00
41 changed files with 1215 additions and 752 deletions

View File

@@ -1,5 +1,14 @@
# Changelog
## v1.6.1
### Fixed
* Fixes error that would sometimes occur when starting a server that would cause the temporary power action lock to never be released due to a blocked channel.
* Fixes a bug causing the CPU usage of Wings to get stuck at 100% when a server is deleted while the installation process is running.
### Changed
* Cleans up a lot of the logic for handling events between the server and environment process to make it easier to make modifications to down the road.
* Cleans up logic handling the `StopAndWait` logic for stopping a server gracefully before terminating the process if it does not respond.
## v1.6.0
### Fixed
* Internal logic for processing a server start event has been adjusted to attach to the Docker container before attempting to start the container. This should fix issues where a server would get stuck after pulling the container image.

View File

@@ -6,7 +6,7 @@ build:
debug:
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)"
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof
sudo ./wings --debug --ignore-certificate-errors --config config.yml --pprof --pprof-block-rate 1
# Runs a remotly debuggable session for Wings allowing an IDE to connect and target
# different breakpoints.

View File

@@ -9,10 +9,13 @@ import (
"net/http"
_ "net/http/pprof"
"os"
"os/signal"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/NYTimes/logrotate"
@@ -27,6 +30,7 @@ import (
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/internal/notify"
"github.com/pterodactyl/wings/loggers/cli"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/router"
@@ -76,6 +80,7 @@ func init() {
// Flags specifically used when running the API.
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
@@ -309,6 +314,12 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
profile, _ := cmd.Flags().GetBool("pprof")
if profile {
if r, _ := cmd.Flags().GetInt("pprof-block-rate"); r > 0 {
runtime.SetBlockProfileRate(r)
}
// Catch at least 1% of mutex contention issues.
runtime.SetMutexProfileFraction(100)
profilePort, _ := cmd.Flags().GetInt("pprof-port")
go func() {
http.ListenAndServe(fmt.Sprintf("localhost:%d", profilePort), nil)
@@ -316,43 +327,57 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
}
// Check if the server should run with TLS but using autocert.
if autotls {
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
Cache: autocert.DirCache(path.Join(sys.RootDirectory, "/.tls-cache")),
HostPolicy: autocert.HostWhitelist(tlshostname),
}
log.WithField("hostname", tlshostname).Info("webserver is now listening with auto-TLS enabled; certificates will be automatically generated by Let's Encrypt")
// Hook autocert into the main http server.
s.TLSConfig.GetCertificate = m.GetCertificate
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, acme.ALPNProto) // enable tls-alpn ACME challenges
// Start the autocert server.
go func() {
if err := http.ListenAndServe(":http", m.HTTPHandler(nil)); err != nil {
log.WithError(err).Error("failed to serve autocert http server")
go func(s *http.Server, api config.ApiConfiguration, sys config.SystemConfiguration, autotls bool, tlshostname string) {
if autotls {
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
Cache: autocert.DirCache(path.Join(sys.RootDirectory, "/.tls-cache")),
HostPolicy: autocert.HostWhitelist(tlshostname),
}
}()
// Start the main http server with TLS using autocert.
if err := s.ListenAndServeTLS("", ""); err != nil {
log.WithFields(log.Fields{"auto_tls": true, "tls_hostname": tlshostname, "error": err}).Fatal("failed to configure HTTP server using auto-tls")
log.WithField("hostname", tlshostname).Info("webserver is now listening with auto-TLS enabled; certificates will be automatically generated by Let's Encrypt")
// Hook autocert into the main http server.
s.TLSConfig.GetCertificate = m.GetCertificate
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, acme.ALPNProto) // enable tls-alpn ACME challenges
// Start the autocert server.
go func() {
if err := http.ListenAndServe(":http", m.HTTPHandler(nil)); err != nil {
log.WithError(err).Error("failed to serve autocert http server")
}
}()
// Start the main http server with TLS using autocert.
if err := s.ListenAndServeTLS("", ""); err != nil {
log.WithFields(log.Fields{"auto_tls": true, "tls_hostname": tlshostname, "error": err}).Fatal("failed to configure HTTP server using auto-tls")
}
return
}
return
// Check if main http server should run with TLS. Otherwise reset the TLS
// config on the server and then serve it over normal HTTP.
if api.Ssl.Enabled {
if err := s.ListenAndServeTLS(api.Ssl.CertificateFile, api.Ssl.KeyFile); err != nil {
log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server")
}
return
}
s.TLSConfig = nil
if err := s.ListenAndServe(); err != nil {
log.WithField("error", err).Fatal("failed to configure HTTP server")
}
}(s, api, sys, autotls, tlshostname)
if err := notify.Readiness(); err != nil {
log.WithField("error", err).Error("failed to notify systemd of readiness state")
}
// Check if main http server should run with TLS. Otherwise reset the TLS
// config on the server and then serve it over normal HTTP.
if api.Ssl.Enabled {
if err := s.ListenAndServeTLS(api.Ssl.CertificateFile, api.Ssl.KeyFile); err != nil {
log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server")
}
return
}
s.TLSConfig = nil
if err := s.ListenAndServe(); err != nil {
log.WithField("error", err).Fatal("failed to configure HTTP server")
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
<-c
if err := notify.Stopping(); err != nil {
log.WithField("error", err).Error("failed to notify systemd of stopping state")
}
}

View File

@@ -89,8 +89,8 @@ type ApiConfiguration struct {
// servers.
DisableRemoteDownload bool `json:"disable_remote_download" yaml:"disable_remote_download"`
// The maximum size for files uploaded through the Panel in bytes.
UploadLimit int `default:"100" json:"upload_limit" yaml:"upload_limit"`
// The maximum size for files uploaded through the Panel in MB.
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
}
// RemoteQueryConfiguration defines the configuration settings for remote requests
@@ -132,6 +132,10 @@ type SystemConfiguration struct {
// Directory where local backups will be stored on the machine.
BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
// TmpDirectory specifies where temporary files for Pterodactyl installation processes
// should be created. This supports environments running docker-in-docker.
TmpDirectory string `default:"/tmp/pterodactyl" yaml:"tmp_directory"`
// The user that should own all of the server files, and be used for containers.
Username string `default:"pterodactyl" yaml:"username"`

View File

@@ -73,6 +73,9 @@ func (e *Environment) ContainerInspect(ctx context.Context) (types.ContainerJSON
res, err := e.client.HTTPClient().Do(req)
if err != nil {
if res == nil {
return st, errdefs.Unknown(err)
}
return st, errdefs.FromStatusCode(err, res.StatusCode)
}

View File

@@ -26,7 +26,7 @@ type Metadata struct {
var _ environment.ProcessEnvironment = (*Environment)(nil)
type Environment struct {
mu sync.RWMutex
mu sync.RWMutex
// The public identifier for this environment. In this case it is the Docker container
// name that will be used for all instances created under it.

View File

@@ -138,9 +138,7 @@ func (e *Environment) Start(ctx context.Context) error {
// You most likely want to be using WaitForStop() rather than this function,
// since this will return as soon as the command is sent, rather than waiting
// for the process to be completed stopped.
//
// TODO: pass context through from the server instance.
func (e *Environment) Stop() error {
func (e *Environment) Stop(ctx context.Context) error {
e.mu.RLock()
s := e.meta.Stop
e.mu.RUnlock()
@@ -164,7 +162,7 @@ func (e *Environment) Stop() error {
case "SIGTERM":
signal = syscall.SIGTERM
}
return e.Terminate(signal)
return e.Terminate(ctx, signal)
}
// If the process is already offline don't switch it back to stopping. Just leave it how
@@ -179,8 +177,10 @@ func (e *Environment) Stop() error {
return e.SendCommand(s.Value)
}
t := time.Second * 30
if err := e.client.ContainerStop(context.Background(), e.Id, &t); err != nil {
// Allow the stop action to run for however long it takes, similar to executing a command
// and using a different logic pathway to wait for the container to stop successfully.
t := time.Duration(-1)
if err := e.client.ContainerStop(ctx, e.Id, &t); err != nil {
// If the container does not exist just mark the process as stopped and return without
// an error.
if client.IsErrNotFound(err) {
@@ -198,45 +198,66 @@ func (e *Environment) Stop() error {
// command. If the server does not stop after seconds have passed, an error will
// be returned, or the instance will be terminated forcefully depending on the
// value of the second argument.
func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
if err := e.Stop(); err != nil {
return err
//
// Calls to Environment.Terminate() in this function use the context passed
// through since we don't want to prevent termination of the server instance
// just because the context.WithTimeout() has expired.
func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, terminate bool) error {
tctx, cancel := context.WithTimeout(context.Background(), duration)
defer cancel()
// If the parent context is canceled, abort the timed context for termination.
go func() {
select {
case <-ctx.Done():
cancel()
case <-tctx.Done():
// When the timed context is canceled, terminate this routine since we no longer
// need to worry about the parent routine being canceled.
break
}
}()
doTermination := func(s string) error {
e.log().WithField("step", s).WithField("duration", duration).Warn("container stop did not complete in time, terminating process...")
return e.Terminate(ctx, os.Kill)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second)
defer cancel()
// We pass through the timed context for this stop action so that if one of the
// internal docker calls fails to ever finish before we've exhausted the time limit
// the resources get cleaned up, and the exection is stopped.
if err := e.Stop(tctx); err != nil {
if terminate && errors.Is(err, context.DeadlineExceeded) {
return doTermination("stop")
}
return err
}
// Block the return of this function until the container as been marked as no
// longer running. If this wait does not end by the time seconds have passed,
// attempt to terminate the container, or return an error.
ok, errChan := e.client.ContainerWait(ctx, e.Id, container.WaitConditionNotRunning)
ok, errChan := e.client.ContainerWait(tctx, e.Id, container.WaitConditionNotRunning)
select {
case <-ctx.Done():
if ctxErr := ctx.Err(); ctxErr != nil {
if err := ctx.Err(); err != nil {
if terminate {
log.WithField("container_id", e.Id).Info("server did not stop in time, executing process termination")
return e.Terminate(os.Kill)
return doTermination("parent-context")
}
return ctxErr
return err
}
case err := <-errChan:
// If the error stems from the container not existing there is no point in wasting
// CPU time to then try and terminate it.
if err != nil && !client.IsErrNotFound(err) {
if terminate {
l := log.WithField("container_id", e.Id)
if errors.Is(err, context.DeadlineExceeded) {
l.Warn("deadline exceeded for container stop; terminating process")
} else {
l.WithField("error", err).Warn("error while waiting for container stop; terminating process")
}
return e.Terminate(os.Kill)
}
return errors.WrapIf(err, "environment/docker: error waiting on container to enter \"not-running\" state")
if err == nil || client.IsErrNotFound(err) {
return nil
}
if terminate {
if !errors.Is(err, context.DeadlineExceeded) {
e.log().WithField("error", err).Warn("error while waiting for container stop; terminating process")
}
return doTermination("wait")
}
return errors.WrapIf(err, "environment/docker: error waiting on container to enter \"not-running\" state")
case <-ok:
}
@@ -244,8 +265,8 @@ func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
}
// Terminate forcefully terminates the container using the signal provided.
func (e *Environment) Terminate(signal os.Signal) error {
c, err := e.ContainerInspect(context.Background())
func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
c, err := e.ContainerInspect(ctx)
if err != nil {
// Treat missing containers as an okay error state, means it is obviously
// already terminated at this point.
@@ -270,7 +291,7 @@ func (e *Environment) Terminate(signal os.Signal) error {
// We set it to stopping than offline to prevent crash detection from being triggered.
e.SetState(environment.ProcessStoppingState)
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil && !client.IsErrNotFound(err) {
if err := e.client.ContainerKill(ctx, e.Id, sig); err != nil && !client.IsErrNotFound(err) {
return errors.WithStack(err)
}
e.SetState(environment.ProcessOfflineState)

View File

@@ -3,6 +3,7 @@ package environment
import (
"context"
"os"
"time"
"github.com/pterodactyl/wings/events"
)
@@ -58,18 +59,20 @@ type ProcessEnvironment interface {
// can be started an error should be returned.
Start(ctx context.Context) error
// Stops a server instance. If the server is already stopped an error should
// not be returned.
Stop() error
// Stop stops a server instance. If the server is already stopped an error will
// not be returned, this function will act as a no-op.
Stop(ctx context.Context) error
// Waits for a server instance to stop gracefully. If the server is still detected
// as running after seconds, an error will be returned, or the server will be terminated
// depending on the value of the second argument.
WaitForStop(seconds uint, terminate bool) error
// WaitForStop waits for a server instance to stop gracefully. If the server is
// still detected as running after "duration", an error will be returned, or the server
// will be terminated depending on the value of the second argument. If the context
// provided is canceled the underlying wait conditions will be stopped and the
// entire loop will be ended (potentially without stopping or terminating).
WaitForStop(ctx context.Context, duration time.Duration, terminate bool) error
// Terminates a running server instance using the provided signal. If the server
// is not running no error should be returned.
Terminate(signal os.Signal) error
// Terminate stops a running server instance using the provided signal. This function
// is a no-op if the server is already stopped.
Terminate(ctx context.Context, signal os.Signal) error
// Destroys the environment removing any containers that were created (in Docker
// environments at least).

View File

@@ -2,10 +2,11 @@ package events
import (
"strings"
"sync"
)
type Listener chan Event
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
)
// Event represents an Event sent over a Bus.
type Event struct {
@@ -15,137 +16,55 @@ type Event struct {
// Bus represents an Event Bus.
type Bus struct {
listenersMx sync.Mutex
listeners map[string][]Listener
*system.SinkPool
}
// NewBus returns a new empty Event Bus.
// NewBus returns a new empty Bus. This is simply a nicer wrapper around the
// system.SinkPool implementation that allows for more simplistic usage within
// the codebase.
//
// All of the events emitted out of this bus are byte slices that can be decoded
// back into an events.Event interface.
func NewBus() *Bus {
return &Bus{
listeners: make(map[string][]Listener),
}
}
// Off unregisters a listener from the specified topics on the Bus.
func (b *Bus) Off(listener Listener, topics ...string) {
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
var closed bool
for _, topic := range topics {
ok := b.off(topic, listener)
if !closed && ok {
close(listener)
closed = true
}
}
}
func (b *Bus) off(topic string, listener Listener) bool {
listeners, ok := b.listeners[topic]
if !ok {
return false
}
for i, l := range listeners {
if l != listener {
continue
}
listeners = append(listeners[:i], listeners[i+1:]...)
b.listeners[topic] = listeners
return true
}
return false
}
// On registers a listener to the specified topics on the Bus.
func (b *Bus) On(listener Listener, topics ...string) {
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
for _, topic := range topics {
b.on(topic, listener)
}
}
func (b *Bus) on(topic string, listener Listener) {
listeners, ok := b.listeners[topic]
if !ok {
b.listeners[topic] = []Listener{listener}
} else {
b.listeners[topic] = append(listeners, listener)
system.NewSinkPool(),
}
}
// Publish publishes a message to the Bus.
func (b *Bus) Publish(topic string, data interface{}) {
// Some of our topics for the socket support passing a more specific namespace,
// Some of our actions for the socket support passing a more specific namespace,
// such as "backup completed:1234" to indicate which specific backup was completed.
//
// In these cases, we still need to send the event using the standard listener
// name of "backup completed".
if strings.Contains(topic, ":") {
parts := strings.SplitN(topic, ":", 2)
if len(parts) == 2 {
topic = parts[0]
}
}
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
listeners, ok := b.listeners[topic]
if !ok {
return
enc, err := json.Marshal(Event{Topic: topic, Data: data})
if err != nil {
panic(errors.WithStack(err))
}
if len(listeners) < 1 {
return
}
var wg sync.WaitGroup
event := Event{Topic: topic, Data: data}
for _, listener := range listeners {
l := listener
wg.Add(1)
go func(l Listener, event Event) {
defer wg.Done()
l <- event
}(l, event)
}
wg.Wait()
b.Push(enc)
}
// Destroy destroys the Event Bus by unregistering and closing all listeners.
func (b *Bus) Destroy() {
b.listenersMx.Lock()
defer b.listenersMx.Unlock()
// Track what listeners have already been closed. Because the same listener
// can be listening on multiple topics, we need a way to essentially
// "de-duplicate" all the listeners across all the topics.
var closed []Listener
for _, listeners := range b.listeners {
for _, listener := range listeners {
if contains(closed, listener) {
continue
}
close(listener)
closed = append(closed, listener)
}
// MustDecode decodes the event byte slice back into an events.Event struct or
// panics if an error is encountered during this process.
func MustDecode(data []byte) (e Event) {
if err := DecodeTo(data, &e); err != nil {
panic(err)
}
b.listeners = make(map[string][]Listener)
return
}
func contains(closed []Listener, listener Listener) bool {
for _, c := range closed {
if c == listener {
return true
}
// DecodeTo decodes a byte slice of event data into the given interface.
func DecodeTo(data []byte, v interface{}) error {
if err := json.Unmarshal(data, &v); err != nil {
return errors.Wrap(err, "events: failed to decode byte slice")
}
return false
return nil
}

View File

@@ -9,162 +9,90 @@ import (
func TestNewBus(t *testing.T) {
g := Goblin(t)
bus := NewBus()
g.Describe("NewBus", func() {
g.It("is not nil", func() {
g.Assert(bus).IsNotNil("Bus expected to not be nil")
g.Assert(bus.listeners).IsNotNil("Bus#listeners expected to not be nil")
})
})
}
func TestBus_Off(t *testing.T) {
g := Goblin(t)
const topic = "test"
g.Describe("Off", func() {
g.It("unregisters listener", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
bus.On(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
bus.Off(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(0, "Topic still has one or more listeners")
g.Describe("Events", func() {
var bus *Bus
g.BeforeEach(func() {
bus = NewBus()
})
g.It("unregisters correct listener", func() {
bus := NewBus()
listener := make(chan Event)
listener2 := make(chan Event)
listener3 := make(chan Event)
bus.On(listener, topic)
bus.On(listener2, topic)
bus.On(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(3, "Listeners were not registered")
bus.Off(listener, topic)
bus.Off(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Expected 1 listener to remain")
if bus.listeners[topic][0] != listener2 {
// A normal Assert does not properly compare channels.
g.Fail("wrong listener unregistered")
}
// Cleanup
bus.Off(listener2, topic)
})
})
}
func TestBus_On(t *testing.T) {
g := Goblin(t)
const topic = "test"
g.Describe("On", func() {
g.It("registers listener", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
bus.On(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
if bus.listeners[topic][0] != listener {
// A normal Assert does not properly compare channels.
g.Fail("wrong listener registered")
}
// Cleanup
bus.Off(listener, topic)
})
})
}
func TestBus_Publish(t *testing.T) {
g := Goblin(t)
const topic = "test"
const message = "this is a test message!"
g.Describe("Publish", func() {
g.It("publishes message", func() {
bus := NewBus()
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
bus.On(listener, topic)
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
done := make(chan struct{}, 1)
go func() {
select {
case m := <-listener:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("listener did not receive message in time")
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener, topic)
g.Describe("NewBus", func() {
g.It("is not nil", func() {
g.Assert(bus).IsNotNil("Bus expected to not be nil")
})
})
g.It("publishes message to all listeners", func() {
bus := NewBus()
g.Describe("Publish", func() {
const topic = "test"
const message = "this is a test message!"
g.Assert(bus.listeners[topic]).IsNotNil()
g.Assert(len(bus.listeners[topic])).IsZero()
listener := make(chan Event)
listener2 := make(chan Event)
listener3 := make(chan Event)
bus.On(listener, topic)
bus.On(listener2, topic)
bus.On(listener3, topic)
g.Assert(len(bus.listeners[topic])).Equal(3, "Listener was not registered")
g.It("publishes message", func() {
bus := NewBus()
done := make(chan struct{}, 1)
go func() {
for i := 0; i < 3; i++ {
listener := make(chan []byte)
bus.On(listener)
done := make(chan struct{}, 1)
go func() {
select {
case m := <-listener:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case m := <-listener2:
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case m := <-listener3:
case v := <-listener:
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("all listeners did not receive the message in time")
i = 3
g.Fail("listener did not receive message in time")
}
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener)
})
// Cleanup
bus.Off(listener, topic)
bus.Off(listener2, topic)
bus.Off(listener3, topic)
g.It("publishes message to all listeners", func() {
bus := NewBus()
listener := make(chan []byte)
listener2 := make(chan []byte)
listener3 := make(chan []byte)
bus.On(listener)
bus.On(listener2)
bus.On(listener3)
done := make(chan struct{}, 1)
go func() {
for i := 0; i < 3; i++ {
select {
case v := <-listener:
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case v := <-listener2:
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case v := <-listener3:
m := MustDecode(v)
g.Assert(m.Topic).Equal(topic)
g.Assert(m.Data).Equal(message)
case <-time.After(1 * time.Second):
g.Fail("all listeners did not receive the message in time")
i = 3
}
}
done <- struct{}{}
}()
bus.Publish(topic, message)
<-done
// Cleanup
bus.Off(listener)
bus.Off(listener2)
bus.Off(listener3)
})
})
})
}

105
go.mod
View File

@@ -3,116 +3,113 @@ module github.com/pterodactyl/wings
go 1.17
require (
emperror.dev/errors v0.8.0
github.com/AlecAivazis/survey/v2 v2.2.15
emperror.dev/errors v0.8.1
github.com/AlecAivazis/survey/v2 v2.3.4
github.com/Jeffail/gabs/v2 v2.6.1
github.com/NYTimes/logrotate v1.0.0
github.com/apex/log v1.9.0
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
github.com/beevik/etree v1.1.0
github.com/buger/jsonparser v1.1.1
github.com/cenkalti/backoff/v4 v4.1.1
github.com/cenkalti/backoff/v4 v4.1.2
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
github.com/creasty/defaults v1.5.1
github.com/docker/docker v20.10.7+incompatible
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.14+incompatible
github.com/docker/go-connections v0.4.0
github.com/fatih/color v1.12.0
github.com/fatih/color v1.13.0
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
github.com/gabriel-vasile/mimetype v1.3.1
github.com/gabriel-vasile/mimetype v1.4.0
github.com/gammazero/workerpool v1.1.2
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gin-gonic/gin v1.7.2
github.com/gin-gonic/gin v1.7.7
github.com/google/uuid v1.3.0
github.com/gorilla/websocket v1.4.2
github.com/gorilla/websocket v1.5.0
github.com/iancoleman/strcase v0.2.0
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
github.com/juju/ratelimit v1.0.1
github.com/karrick/godirwalk v1.16.1
github.com/klauspost/pgzip v1.2.5
github.com/magiconair/properties v1.8.5
github.com/mattn/go-colorable v0.1.8
github.com/mholt/archiver/v3 v3.5.0
github.com/magiconair/properties v1.8.6
github.com/mattn/go-colorable v0.1.12
github.com/mholt/archiver/v3 v3.5.1
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/profile v1.6.0
github.com/pkg/sftp v1.13.2
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f
github.com/spf13/cobra v1.2.1
github.com/pkg/sftp v1.13.4
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/spf13/cobra v1.4.0
github.com/stretchr/testify v1.7.0
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
gopkg.in/ini.v1 v1.62.0
gopkg.in/ini.v1 v1.66.4
gopkg.in/yaml.v2 v2.4.0
)
require github.com/goccy/go-json v0.9.4
require github.com/goccy/go-json v0.9.6
require golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 // indirect
require golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.5.0 // indirect
github.com/Microsoft/hcsshim v0.8.20 // indirect
github.com/andybalholm/brotli v1.0.3 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/containerd/containerd v1.5.5 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/containerd v1.6.2 // indirect
github.com/containerd/fifo v1.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/gammazero/deque v0.1.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/gammazero/deque v0.1.1 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-playground/locales v0.13.0 // indirect
github.com/go-playground/universal-translator v0.17.0 // indirect
github.com/go-playground/validator/v10 v10.8.0 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.10.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/gorilla/mux v1.7.4 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/compress v1.13.2 // indirect
github.com/klauspost/compress v1.15.1 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/magefile/mage v1.11.0 // indirect
github.com/mattn/go-isatty v0.0.13 // indirect
github.com/magefile/mage v1.13.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/nwaples/rardecode v1.1.1 // indirect
github.com/nwaples/rardecode v1.1.3 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/pierrec/lz4/v4 v4.1.8 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pierrec/lz4/v4 v4.1.14 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.30.0 // indirect
github.com/prometheus/procfs v0.7.1 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/ugorji/go/codec v1.1.7 // indirect
github.com/ugorji/go/codec v1.2.7 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
go.uber.org/multierr v1.8.0 // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/genproto v0.0.0-20210729151513-df9385d47c1b // indirect
google.golang.org/grpc v1.39.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
google.golang.org/grpc v1.45.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)

474
go.sum

File diff suppressed because it is too large Load Diff

19
internal/notify/notify.go Normal file
View File

@@ -0,0 +1,19 @@
// Package notify handles notifying the operating system of the program's state.
//
// For linux based operating systems, this is done through the systemd socket
// set by "NOTIFY_SOCKET" environment variable.
//
// Currently, no other operating systems are supported.
package notify
func Readiness() error {
return readiness()
}
func Reloading() error {
return reloading()
}
func Stopping() error {
return stopping()
}

View File

@@ -0,0 +1,48 @@
package notify
import (
"io"
"net"
"os"
"strings"
)
func notify(path string, r io.Reader) error {
s := &net.UnixAddr{
Name: path,
Net: "unixgram",
}
c, err := net.DialUnix(s.Net, nil, s)
if err != nil {
return err
}
defer c.Close()
if _, err := io.Copy(c, r); err != nil {
return err
}
return nil
}
func socketNotify(payload string) error {
v, ok := os.LookupEnv("NOTIFY_SOCKET")
if !ok || v == "" {
return nil
}
if err := notify(v, strings.NewReader(payload)); err != nil {
return err
}
return nil
}
func readiness() error {
return socketNotify("READY=1")
}
func reloading() error {
return socketNotify("RELOADING=1")
}
func stopping() error {
return socketNotify("STOPPING=1")
}

View File

@@ -0,0 +1,16 @@
//go:build !linux
// +build !linux
package notify
func readiness() error {
return nil
}
func reloading() error {
return nil
}
func stopping() error {
return nil
}

View File

@@ -11,9 +11,9 @@ import (
"github.com/apex/log"
"github.com/beevik/etree"
"github.com/buger/jsonparser"
"github.com/goccy/go-json"
"github.com/icza/dyno"
"github.com/magiconair/properties"
"github.com/goccy/go-json"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v2"

View File

@@ -71,6 +71,7 @@ type SftpAuthRequest struct {
IP string `json:"ip"`
SessionID []byte `json:"session_id"`
ClientVersion []byte `json:"client_version"`
Type string `json:"type"`
}
// SftpAuthResponse is returned by the Panel when a pair of SFTP credentials
@@ -78,8 +79,8 @@ type SftpAuthRequest struct {
// matched as well as the permissions that are assigned to the authenticated
// user for the SFTP subsystem.
type SftpAuthResponse struct {
SSHKeys []string `json:"ssh_keys"`
Server string `json:"server"`
Token string `json:"token"`
Permissions []string `json:"permissions"`
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
"mime"
"net"
"net/http"
"net/url"
@@ -13,8 +14,8 @@ import (
"time"
"emperror.dev/errors"
"github.com/google/uuid"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/pterodactyl/wings/server"
)
@@ -77,10 +78,13 @@ func (c *Counter) Write(p []byte) (int, error) {
type DownloadRequest struct {
Directory string
URL *url.URL
FileName string
UseHeader bool
}
type Download struct {
Identifier string
path string
mu sync.RWMutex
req DownloadRequest
server *server.Server
@@ -172,8 +176,28 @@ func (dl *Download) Execute() error {
}
}
fnameparts := strings.Split(dl.req.URL.Path, "/")
p := filepath.Join(dl.req.Directory, fnameparts[len(fnameparts)-1])
if dl.req.UseHeader {
if contentDisposition := res.Header.Get("Content-Disposition"); contentDisposition != "" {
_, params, err := mime.ParseMediaType(contentDisposition)
if err != nil {
return errors.WrapIf(err, "downloader: invalid \"Content-Disposition\" header")
}
if v, ok := params["filename"]; ok {
dl.path = v
}
}
}
if dl.path == "" {
if dl.req.FileName != "" {
dl.path = dl.req.FileName
} else {
parts := strings.Split(dl.req.URL.Path, "/")
dl.path = parts[len(parts)-1]
}
}
p := dl.Path()
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
@@ -205,6 +229,10 @@ func (dl *Download) Progress() float64 {
return dl.progress
}
func (dl *Download) Path() string {
return filepath.Join(dl.req.Directory, dl.path)
}
// Handles a write event by updating the progress completed percentage and firing off
// events to the server websocket as needed.
func (dl *Download) counter(contentLength int64) *Counter {

View File

@@ -13,6 +13,8 @@ import (
"strconv"
"strings"
"github.com/pterodactyl/wings/config"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gin-gonic/gin"
@@ -255,9 +257,12 @@ func postServerPullRemoteFile(c *gin.Context) {
s := ExtractServer(c)
var data struct {
// Deprecated
Directory string `binding:"required_without=RootPath,omitempty" json:"directory"`
RootPath string `binding:"required_without=Directory,omitempty" json:"root"`
URL string `binding:"required" json:"url"`
Directory string `binding:"required_without=RootPath,omitempty" json:"directory"`
RootPath string `binding:"required_without=Directory,omitempty" json:"root"`
URL string `binding:"required" json:"url"`
FileName string `json:"file_name"`
UseHeader bool `json:"use_header"`
Foreground bool `json:"foreground"`
}
if err := c.BindJSON(&data); err != nil {
return
@@ -295,21 +300,41 @@ func postServerPullRemoteFile(c *gin.Context) {
dl := downloader.New(s, downloader.DownloadRequest{
Directory: data.RootPath,
URL: u,
FileName: data.FileName,
UseHeader: data.UseHeader,
})
// Execute this pull in a separate thread since it may take a long time to complete.
go func() {
download := func() error {
s.Log().WithField("download_id", dl.Identifier).WithField("url", u.String()).Info("starting pull of remote file to disk")
if err := dl.Execute(); err != nil {
s.Log().WithField("download_id", dl.Identifier).WithField("error", err).Error("failed to pull remote file")
return err
} else {
s.Log().WithField("download_id", dl.Identifier).Info("completed pull of remote file")
}
}()
return nil
}
if !data.Foreground {
go func() {
_ = download()
}()
c.JSON(http.StatusAccepted, gin.H{
"identifier": dl.Identifier,
})
return
}
c.JSON(http.StatusAccepted, gin.H{
"identifier": dl.Identifier,
})
if err := download(); err != nil {
NewServerError(err, s).Abort(c)
return
}
st, err := s.Filesystem().Stat(dl.Path())
if err != nil {
NewServerError(err, s).AbortFilesystemError(c)
return
}
c.JSON(http.StatusOK, &st)
}
// Stops a remote file download if it exists and belongs to this server.
@@ -537,8 +562,16 @@ func postServerUploadFiles(c *gin.Context) {
directory := c.Query("directory")
maxFileSize := config.Get().Api.UploadLimit
maxFileSizeBytes := maxFileSize * 1024 * 1024
var totalSize int64
for _, header := range headers {
if header.Size > maxFileSizeBytes {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "File " + header.Filename + " is larger than the maximum file upload size of " + strconv.FormatInt(maxFileSize, 10) + " MB.",
})
return
}
totalSize += header.Size
}

View File

@@ -5,8 +5,8 @@ import (
"time"
"github.com/gin-gonic/gin"
ws "github.com/gorilla/websocket"
"github.com/goccy/go-json"
ws "github.com/gorilla/websocket"
"github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/websocket"

View File

@@ -178,7 +178,7 @@ func postServerArchive(c *gin.Context) {
// Ensure the server is offline. Sometimes a "No such container" error gets through
// which means the server is already stopped. We can ignore that.
if err := s.Environment.WaitForStop(60, false); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") {
if err := s.Environment.WaitForStop(s.Context(), time.Minute, false); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") {
sendTransferLog("Failed to stop server, aborting transfer..")
l.WithField("error", err).Error("failed to stop server")
return

View File

@@ -7,8 +7,9 @@ import (
"emperror.dev/errors"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/server"
)
@@ -88,12 +89,13 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
eventChan := make(chan events.Event)
eventChan := make(chan []byte)
logOutput := make(chan []byte, 8)
installOutput := make(chan []byte, 4)
h.server.Events().On(eventChan, e...)
h.server.Sink(server.LogSink).On(logOutput)
h.server.Sink(server.InstallSink).On(installOutput)
h.server.Events().On(eventChan) // TODO: make a sinky
h.server.Sink(system.LogSink).On(logOutput)
h.server.Sink(system.InstallSink).On(installOutput)
onError := func(evt string, err2 error) {
h.Logger().WithField("event", evt).WithField("error", err2).Error("failed to send event over server websocket")
@@ -110,19 +112,23 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
select {
case <-ctx.Done():
break
case e := <-logOutput:
sendErr := h.SendJson(Message{Event: server.ConsoleOutputEvent, Args: []string{string(e)}})
case b := <-logOutput:
sendErr := h.SendJson(Message{Event: server.ConsoleOutputEvent, Args: []string{string(b)}})
if sendErr == nil {
continue
}
onError(server.ConsoleOutputEvent, sendErr)
case e := <-installOutput:
sendErr := h.SendJson(Message{Event: server.InstallOutputEvent, Args: []string{string(e)}})
case b := <-installOutput:
sendErr := h.SendJson(Message{Event: server.InstallOutputEvent, Args: []string{string(b)}})
if sendErr == nil {
continue
}
onError(server.InstallOutputEvent, sendErr)
case e := <-eventChan:
case b := <-eventChan:
var e events.Event
if err := events.DecodeTo(b, &e); err != nil {
continue
}
var sendErr error
message := Message{Event: e.Topic}
if str, ok := e.Data.(string); ok {
@@ -148,9 +154,9 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
}
// These functions will automatically close the channel if it hasn't been already.
h.server.Events().Off(eventChan, e...)
h.server.Sink(server.LogSink).Off(logOutput)
h.server.Sink(server.InstallSink).Off(installOutput)
h.server.Events().Off(eventChan)
h.server.Sink(system.LogSink).Off(logOutput)
h.server.Sink(system.InstallSink).Off(installOutput)
// If the internal context is stopped it is either because the parent context
// got canceled or because we ran into an error. If the "err" variable is nil
@@ -158,6 +164,5 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
if err != nil {
return errors.WithStack(err)
}
return nil
}

View File

@@ -11,9 +11,10 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gbrlsnchs/jwt/v3"
"github.com/goccy/go-json"
"github.com/google/uuid"
"github.com/gorilla/websocket"
"github.com/goccy/go-json"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
@@ -353,7 +354,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
}
err := h.server.HandlePowerAction(action)
if errors.Is(err, context.DeadlineExceeded) {
if errors.Is(err, system.ErrLockerLocked) {
m, _ := h.GetErrorMessage("another power action is currently being processed for this server, please try again later")
_ = h.SendJson(Message{

View File

@@ -142,7 +142,7 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
// instance, otherwise you'll likely hit all types of write errors due to the
// server being suspended.
if s.Environment.State() != environment.ProcessOfflineState {
if err = s.Environment.WaitForStop(120, false); err != nil {
if err = s.Environment.WaitForStop(s.Context(), time.Minute*2, false); err != nil {
if !client.IsErrNotFound(err) {
return errors.WrapIf(err, "server/backup: restore: failed to wait for container stop")
}

View File

@@ -6,12 +6,14 @@ import (
"github.com/gammazero/workerpool"
)
// Parent function that will update all of the defined configuration files for a server
// automatically to ensure that they always use the specified values.
// UpdateConfigurationFiles updates all of the defined configuration files for
// a server automatically to ensure that they always use the specified values.
func (s *Server) UpdateConfigurationFiles() {
pool := workerpool.New(runtime.NumCPU())
s.Log().Debug("acquiring process configuration files...")
files := s.ProcessConfiguration().ConfigurationFiles
s.Log().Debug("acquired process configuration files")
for _, cf := range files {
f := cf
@@ -26,6 +28,8 @@ func (s *Server) UpdateConfigurationFiles() {
if err := f.Parse(p, false); err != nil {
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
}
s.Log().WithField("path", f.FileName).Debug("finished processing server configuration file")
})
}

View File

@@ -19,7 +19,7 @@ func TestName(t *testing.T) {
})
g.It("calls strike once per time period", func() {
t := newConsoleThrottle(1, time.Millisecond * 20)
t := newConsoleThrottle(1, time.Millisecond*20)
var times int
t.strike = func() {
@@ -53,10 +53,10 @@ func TestName(t *testing.T) {
}
func BenchmarkConsoleThrottle(b *testing.B) {
t := newConsoleThrottle(10, time.Millisecond * 10)
t := newConsoleThrottle(10, time.Millisecond*10)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
t.Allow()
}
for i := 0; i < b.N; i++ {
t.Allow()
}
}

View File

@@ -2,6 +2,7 @@ package server
import (
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
)
// Defines all of the possible output events for a server.
@@ -20,7 +21,7 @@ const (
TransferStatusEvent = "transfer status"
)
// Returns the server's emitter instance.
// Events returns the server's emitter instance.
func (s *Server) Events() *events.Bus {
s.emitterLock.Lock()
defer s.emitterLock.Unlock()
@@ -31,3 +32,24 @@ func (s *Server) Events() *events.Bus {
return s.emitter
}
// Sink returns the instantiated and named sink for a server. If the sink has
// not been configured yet this function will cause a panic condition.
func (s *Server) Sink(name system.SinkName) *system.SinkPool {
sink, ok := s.sinks[name]
if !ok {
s.Log().Fatalf("attempt to access nil sink: %s", name)
}
return sink
}
// DestroyAllSinks iterates over all of the sinks configured for the server and
// destroys their instances. Note that this will cause a panic if you attempt
// to call Server.Sink() again after. This function is only used when a server
// is being deleted from the system.
func (s *Server) DestroyAllSinks() {
s.Log().Info("destroying all registered sinks for server instance")
for _, sink := range s.sinks {
sink.Destroy()
}
}

View File

@@ -10,6 +10,7 @@ import (
"path/filepath"
"strconv"
"strings"
"time"
"emperror.dev/errors"
"github.com/apex/log"
@@ -17,23 +18,23 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/system"
)
// Executes the installation stack for a server process. Bubbles any errors up to the calling
// function which should handle contacting the panel to notify it of the server state.
// Install executes the installation stack for a server process. Bubbles any
// errors up to the calling function which should handle contacting the panel to
// notify it of the server state.
//
// Pass true as the first argument in order to execute a server sync before the process to
// ensure the latest information is used.
// Pass true as the first argument in order to execute a server sync before the
// process to ensure the latest information is used.
func (s *Server) Install(sync bool) error {
if sync {
s.Log().Info("syncing server state with remote source before executing installation process")
if err := s.Sync(); err != nil {
return err
return errors.WrapIf(err, "install: failed to sync server state with Panel")
}
}
@@ -57,7 +58,7 @@ func (s *Server) Install(sync bool) error {
// error to this log entry. Otherwise ignore it in this log since whatever is calling
// this function should handle the error and will end up logging the same one.
if err == nil {
l.WithField("error", serr)
l.WithField("error", err)
}
l.Warn("failed to notify panel of server install state")
@@ -71,7 +72,7 @@ func (s *Server) Install(sync bool) error {
// the install is completed.
s.Events().Publish(InstallCompletedEvent, "")
return err
return errors.WithStackIf(err)
}
// Reinstalls a server's software by utilizing the install script for the server egg. This
@@ -79,8 +80,8 @@ func (s *Server) Install(sync bool) error {
func (s *Server) Reinstall() error {
if s.Environment.State() != environment.ProcessOfflineState {
s.Log().Debug("waiting for server instance to enter a stopped state")
if err := s.Environment.WaitForStop(10, true); err != nil {
return err
if err := s.Environment.WaitForStop(s.Context(), time.Second*10, true); err != nil {
return errors.WrapIf(err, "install: failed to stop running environment")
}
}
@@ -110,9 +111,7 @@ func (s *Server) internalInstall() error {
type InstallationProcess struct {
Server *Server
Script *remote.InstallationScript
client *client.Client
context context.Context
client *client.Client
}
// Generates a new installation process struct that will be used to create containers,
@@ -127,7 +126,6 @@ func NewInstallationProcess(s *Server, script *remote.InstallationScript) (*Inst
return nil, err
} else {
proc.client = c
proc.context = s.Context()
}
return proc, nil
@@ -157,7 +155,7 @@ func (s *Server) SetRestoring(state bool) {
// Removes the installer container for the server.
func (ip *InstallationProcess) RemoveContainer() error {
err := ip.client.ContainerRemove(ip.context, ip.Server.ID()+"_installer", types.ContainerRemoveOptions{
err := ip.client.ContainerRemove(ip.Server.Context(), ip.Server.ID()+"_installer", types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
@@ -167,11 +165,10 @@ func (ip *InstallationProcess) RemoveContainer() error {
return nil
}
// Runs the installation process, this is done as in a background thread. This will configure
// the required environment, and then spin up the installation container.
//
// Once the container finishes installing the results will be stored in an installation
// log in the server's configuration directory.
// Run runs the installation process, this is done as in a background thread.
// This will configure the required environment, and then spin up the
// installation container. Once the container finishes installing the results
// are stored in an installation log in the server's configuration directory.
func (ip *InstallationProcess) Run() error {
ip.Server.Log().Debug("acquiring installation process lock")
if !ip.Server.installing.SwapIf(true) {
@@ -207,7 +204,7 @@ func (ip *InstallationProcess) Run() error {
// Returns the location of the temporary data for the installation process.
func (ip *InstallationProcess) tempDir() string {
return filepath.Join(os.TempDir(), "pterodactyl/", ip.Server.ID())
return filepath.Join(config.Get().System.TmpDirectory, ip.Server.ID())
}
// Writes the installation script to a temporary file on the host machine so that it
@@ -267,9 +264,9 @@ func (ip *InstallationProcess) pullInstallationImage() error {
imagePullOptions.RegistryAuth = b64
}
r, err := ip.client.ImagePull(context.Background(), ip.Script.ContainerImage, imagePullOptions)
r, err := ip.client.ImagePull(ip.Server.Context(), ip.Script.ContainerImage, imagePullOptions)
if err != nil {
images, ierr := ip.client.ImageList(context.Background(), types.ImageListOptions{})
images, ierr := ip.client.ImageList(ip.Server.Context(), types.ImageListOptions{})
if ierr != nil {
// Well damn, something has gone really wrong here, just go ahead and abort there
// isn't much anything we can do to try and self-recover from this.
@@ -312,9 +309,10 @@ func (ip *InstallationProcess) pullInstallationImage() error {
return nil
}
// Runs before the container is executed. This pulls down the required docker container image
// as well as writes the installation script to the disk. This process is executed in an async
// manner, if either one fails the error is returned.
// BeforeExecute runs before the container is executed. This pulls down the
// required docker container image as well as writes the installation script to
// the disk. This process is executed in an async manner, if either one fails
// the error is returned.
func (ip *InstallationProcess) BeforeExecute() error {
if err := ip.writeScriptToDisk(); err != nil {
return errors.WithMessage(err, "failed to write installation script to disk")
@@ -340,7 +338,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
defer ip.RemoveContainer()
ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server")
reader, err := ip.client.ContainerLogs(ip.context, containerId, types.ContainerLogsOptions{
reader, err := ip.client.ContainerLogs(ip.Server.Context(), containerId, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: false,
@@ -395,12 +393,13 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
return nil
}
// Executes the installation process inside a specially created docker container.
// Execute executes the installation process inside a specially created docker
// container.
func (ip *InstallationProcess) Execute() (string, error) {
// Create a child context that is canceled once this function is done running. This
// will also be canceled if the parent context (from the Server struct) is canceled
// which occurs if the server is deleted.
ctx, cancel := context.WithCancel(ip.context)
ctx, cancel := context.WithCancel(ip.Server.Context())
defer cancel()
conf := &container.Config{
@@ -511,18 +510,15 @@ func (ip *InstallationProcess) Execute() (string, error) {
// the server configuration directory, as well as to a websocket listener so
// that the process can be viewed in the panel by administrators.
func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error {
reader, err := ip.client.ContainerLogs(ctx, id, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
opts := types.ContainerLogsOptions{ShowStdout: true, ShowStderr: true, Follow: true}
reader, err := ip.client.ContainerLogs(ctx, id, opts)
if err != nil {
return err
}
defer reader.Close()
err = system.ScanReader(reader, ip.Server.Sink(InstallSink).Push)
if err != nil {
err = system.ScanReader(reader, ip.Server.Sink(system.InstallSink).Push)
if err != nil && !errors.Is(err, context.Canceled) {
ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines")
}
return nil

View File

@@ -5,11 +5,13 @@ import (
"regexp"
"strconv"
"sync"
"time"
"github.com/apex/log"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote"
)
@@ -44,7 +46,7 @@ func (dsl *diskSpaceLimiter) Reset() {
func (dsl *diskSpaceLimiter) Trigger() {
dsl.o.Do(func() {
dsl.server.PublishConsoleOutputFromDaemon("Server is exceeding the assigned disk space limit, stopping process now.")
if err := dsl.server.Environment.WaitForStop(60, true); err != nil {
if err := dsl.server.Environment.WaitForStop(dsl.server.Context(), time.Minute, true); err != nil {
dsl.server.Log().WithField("error", err).Error("failed to stop server after exceeding space limit!")
}
})
@@ -72,47 +74,57 @@ func (s *Server) processConsoleOutputEvent(v []byte) {
return
}
s.Sink(LogSink).Push(v)
s.Sink(system.LogSink).Push(v)
}
// StartEventListeners adds all the internal event listeners we want to use for
// a server. These listeners can only be removed by deleting the server as they
// should last for the duration of the process' lifetime.
func (s *Server) StartEventListeners() {
state := make(chan events.Event)
stats := make(chan events.Event)
docker := make(chan events.Event)
c := make(chan []byte, 8)
limit := newDiskLimiter(s)
s.Log().Debug("registering event listeners: console, state, resources...")
s.Environment.Events().On(c)
s.Environment.SetLogCallback(s.processConsoleOutputEvent)
go func() {
l := newDiskLimiter(s)
for {
select {
case e := <-state:
go func() {
// Reset the throttler when the process is started.
if e.Data == environment.ProcessStartingState {
l.Reset()
s.Throttler().Reset()
case v := <-c:
go func(v []byte, limit *diskSpaceLimiter) {
var e events.Event
if err := events.DecodeTo(v, &e); err != nil {
return
}
s.OnStateChange()
}()
case e := <-stats:
go func() {
s.resources.UpdateStats(e.Data.(environment.Stats))
// If there is no disk space available at this point, trigger the server
// disk limiter logic which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) {
l.Trigger()
}
s.Events().Publish(StatsEvent, s.Proc())
}()
case e := <-docker:
go func() {
switch e.Topic {
case environment.ResourceEvent:
{
var stats struct {
Topic string
Data environment.Stats
}
if err := events.DecodeTo(v, &stats); err != nil {
s.Log().WithField("error", err).Warn("failed to decode server resource event")
return
}
s.resources.UpdateStats(stats.Data)
// If there is no disk space available at this point, trigger the server
// disk limiter logic which will start to stop the running instance.
if !s.Filesystem().HasSpaceAvailable(true) {
limit.Trigger()
}
s.Events().Publish(StatsEvent, s.Proc())
}
case environment.StateChangeEvent:
{
// Reset the throttler when the process is started.
if e.Data == environment.ProcessStartingState {
limit.Reset()
s.Throttler().Reset()
}
s.OnStateChange()
}
case environment.DockerImagePullStatus:
s.Events().Publish(InstallOutputEvent, e.Data)
case environment.DockerImagePullStarted:
@@ -120,18 +132,13 @@ func (s *Server) StartEventListeners() {
case environment.DockerImagePullCompleted:
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
default:
s.Log().WithField("topic", e.Topic).Error("unhandled docker event topic")
}
}()
}(v, limit)
case <-s.Context().Done():
return
}
}
}()
s.Log().Debug("registering event listeners: console, state, resources...")
s.Environment.SetLogCallback(s.processConsoleOutputEvent)
s.Environment.Events().On(state, environment.StateChangeEvent)
s.Environment.Events().On(stats, environment.ResourceEvent)
s.Environment.Events().On(docker, dockerEvents...)
}
var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))")

View File

@@ -133,11 +133,11 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return s.Environment.Start(s.Context())
case PowerActionStop:
// We're specifically waiting for the process to be stopped here, otherwise the lock is released
// too soon, and you can rack up all sorts of issues.
return s.Environment.WaitForStop(10*60, true)
fallthrough
case PowerActionRestart:
if err := s.Environment.WaitForStop(10*60, true); err != nil {
// We're specifically waiting for the process to be stopped here, otherwise the lock is
// released too soon, and you can rack up all sorts of issues.
if err := s.Environment.WaitForStop(s.Context(), time.Minute*10, true); err != nil {
// Even timeout errors should be bubbled back up the stack. If the process didn't stop
// nicely, but the terminate argument was passed then the server is stopped without an
// error being returned.
@@ -149,6 +149,10 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return err
}
if action == PowerActionStop {
return nil
}
// Now actually try to start the process by executing the normal pre-boot logic.
if err := s.onBeforeStart(); err != nil {
return err
@@ -156,7 +160,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return s.Environment.Start(s.Context())
case PowerActionTerminate:
return s.Environment.Terminate(os.Kill)
return s.Environment.Terminate(s.Context(), os.Kill)
}
return errors.New("attempting to handle unknown power action")
@@ -197,15 +201,19 @@ func (s *Server) onBeforeStart() error {
// we don't need to actively do anything about it at this point, worse comes to worst the
// server starts in a weird state and the user can manually adjust.
s.PublishConsoleOutputFromDaemon("Updating process configuration files...")
s.Log().Debug("updating server configuration files...")
s.UpdateConfigurationFiles()
s.Log().Debug("updated server configuration files")
if config.Get().System.CheckPermissionsOnBoot {
s.PublishConsoleOutputFromDaemon("Ensuring file permissions are set correctly, this could take a few seconds...")
// Ensure all the server file permissions are set correctly before booting the process.
s.Log().Debug("chowning server root directory...")
if err := s.Filesystem().Chown("/"); err != nil {
return errors.WithMessage(err, "failed to chown root server directory during pre-boot process")
}
}
s.Log().Info("completed server preflight, starting boot process...")
return nil
}

View File

@@ -70,10 +70,10 @@ type Server struct {
wsBag *WebsocketBag
wsBagLocker sync.Mutex
sinks map[SinkName]*sinkPool
sinks map[system.SinkName]*system.SinkPool
logSink *sinkPool
installSink *sinkPool
logSink *system.SinkPool
installSink *system.SinkPool
}
// New returns a new server instance with a context and all of the default
@@ -88,9 +88,9 @@ func New(client remote.Client) (*Server, error) {
transferring: system.NewAtomicBool(false),
restoring: system.NewAtomicBool(false),
powerLock: system.NewLocker(),
sinks: map[SinkName]*sinkPool{
LogSink: newSinkPool(),
InstallSink: newSinkPool(),
sinks: map[system.SinkName]*system.SinkPool{
system.LogSink: system.NewSinkPool(),
system.InstallSink: system.NewSinkPool(),
},
}
if err := defaults.Set(&s); err != nil {

View File

@@ -1,6 +1,8 @@
package server
import (
"time"
"github.com/pterodactyl/wings/environment/docker"
"github.com/pterodactyl/wings/environment"
@@ -58,7 +60,7 @@ func (s *Server) SyncWithEnvironment() {
s.Log().Info("server suspended with running process state, terminating now")
go func(s *Server) {
if err := s.Environment.WaitForStop(60, true); err != nil {
if err := s.Environment.WaitForStop(s.Context(), time.Minute, true); err != nil {
s.Log().WithField("error", err).Warn("failed to terminate server environment after suspension")
}
}(s)

View File

@@ -125,7 +125,7 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
return f, nil
}
// Filecmd hander for basic SFTP system calls related to files, but not anything to do with reading
// Filecmd handler for basic SFTP system calls related to files, but not anything to do with reading
// or writing to those files.
func (h *Handler) Filecmd(request *sftp.Request) error {
if h.ro {

View File

@@ -1,11 +1,17 @@
package sftp
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"io"
"io/ioutil"
"net"
"os"
"path"
@@ -16,7 +22,6 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/pkg/sftp"
"golang.org/x/crypto/ed25519"
"golang.org/x/crypto/ssh"
"github.com/pterodactyl/wings/config"
@@ -51,37 +56,28 @@ func New(m *server.Manager) *SFTPServer {
// SFTP connections. This will automatically generate an ED25519 key if one does
// not already exist on the system for host key verification purposes.
func (c *SFTPServer) Run() error {
if _, err := os.Stat(c.PrivateKeyPath()); os.IsNotExist(err) {
if err := c.generateED25519PrivateKey(); err != nil {
return err
}
} else if err != nil {
return errors.Wrap(err, "sftp: could not stat private key file")
}
pb, err := os.ReadFile(c.PrivateKeyPath())
if err != nil {
return errors.Wrap(err, "sftp: could not read private key file")
}
private, err := ssh.ParsePrivateKey(pb)
keys, err := c.loadPrivateKeys()
if err != nil {
return err
}
conf := &ssh.ServerConfig{
NoClientAuth: false,
MaxAuthTries: 6,
PasswordCallback: c.passwordCallback,
NoClientAuth: false,
MaxAuthTries: 6,
PasswordCallback: c.passwordCallback,
PublicKeyCallback: c.publicKeyCallback,
}
for _, k := range keys {
conf.AddHostKey(k)
}
conf.AddHostKey(private)
listener, err := net.Listen("tcp", c.Listen)
if err != nil {
return err
}
public := string(ssh.MarshalAuthorizedKey(private.PublicKey()))
log.WithField("listen", c.Listen).WithField("public_key", strings.Trim(public, "\n")).Info("sftp server listening for connections")
log.WithField("listen", c.Listen).Info("sftp server listening for connections")
for {
if conn, _ := listener.Accept(); conn != nil {
go func(conn net.Conn) {
@@ -107,7 +103,7 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
// If its not a session channel we just move on because its not something we
// know how to handle at this point.
if ch.ChannelType() != "session" {
ch.Reject(ssh.UnknownChannelType, "unknown channel type")
_ = ch.Reject(ssh.UnknownChannelType, "unknown channel type")
continue
}
@@ -121,7 +117,7 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
// Channels have a type that is dependent on the protocol. For SFTP
// this is "subsystem" with a payload that (should) be "sftp". Discard
// anything else we receive ("pty", "shell", etc)
req.Reply(req.Type == "subsystem" && string(req.Payload[4:]) == "sftp", nil)
_ = req.Reply(req.Type == "subsystem" && string(req.Payload[4:]) == "sftp", nil)
}
}(requests)
@@ -139,6 +135,7 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
return s.ID() == uuid
})
if srv == nil {
_ = conn.Close()
continue
}
@@ -146,37 +143,160 @@ func (c *SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) {
// them access to the underlying filesystem.
handler := sftp.NewRequestServer(channel, NewHandler(sconn, srv).Handlers())
if err := handler.Serve(); err == io.EOF {
handler.Close()
_ = handler.Close()
}
}
}
// Generates a new ED25519 private key that is used for host authentication when
// a user connects to the SFTP server.
func (c *SFTPServer) generateED25519PrivateKey() error {
_, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return errors.Wrap(err, "sftp: failed to generate ED25519 private key")
func (c *SFTPServer) loadPrivateKeys() ([]ssh.Signer, error) {
if _, err := os.Stat(path.Join(c.BasePath, ".sftp/id_rsa")); err != nil {
if !os.IsNotExist(err) {
return nil, err
}
if err := c.generateRSAPrivateKey(); err != nil {
return nil, err
}
}
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath()), 0o755); err != nil {
return errors.Wrap(err, "sftp: could not create internal sftp data directory")
}
o, err := os.OpenFile(c.PrivateKeyPath(), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
rsaBytes, err := ioutil.ReadFile(path.Join(c.BasePath, ".sftp/id_rsa"))
if err != nil {
return errors.WithStack(err)
return nil, errors.Wrap(err, "sftp/server: could not read private key file")
}
rsaPrivateKey, err := ssh.ParsePrivateKey(rsaBytes)
if err != nil {
return nil, err
}
if _, err := os.Stat(path.Join(c.BasePath, ".sftp/id_ecdsa")); err != nil {
if !os.IsNotExist(err) {
return nil, err
}
if err := c.generateECDSAPrivateKey(); err != nil {
return nil, err
}
}
ecdsaBytes, err := ioutil.ReadFile(path.Join(c.BasePath, ".sftp/id_ecdsa"))
if err != nil {
return nil, errors.Wrap(err, "sftp/server: could not read private key file")
}
ecdsaPrivateKey, err := ssh.ParsePrivateKey(ecdsaBytes)
if err != nil {
return nil, err
}
if _, err := os.Stat(path.Join(c.BasePath, ".sftp/id_ed25519")); err != nil {
if !os.IsNotExist(err) {
return nil, err
}
if err := c.generateEd25519PrivateKey(); err != nil {
return nil, err
}
}
ed25519Bytes, err := ioutil.ReadFile(path.Join(c.BasePath, ".sftp/id_ed25519"))
if err != nil {
return nil, errors.Wrap(err, "sftp/server: could not read private key file")
}
ed25519PrivateKey, err := ssh.ParsePrivateKey(ed25519Bytes)
if err != nil {
return nil, err
}
return []ssh.Signer{
rsaPrivateKey,
ecdsaPrivateKey,
ed25519PrivateKey,
}, nil
}
// generateRSAPrivateKey generates a RSA-4096 private key that will be used by the SFTP server.
func (c *SFTPServer) generateRSAPrivateKey() error {
key, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return err
}
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("rsa")), 0o755); err != nil {
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
}
o, err := os.OpenFile(c.PrivateKeyPath("rsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return err
}
defer o.Close()
b, err := x509.MarshalPKCS8PrivateKey(priv)
if err != nil {
return errors.Wrap(err, "sftp: failed to marshal private key into bytes")
}
if err := pem.Encode(o, &pem.Block{Type: "PRIVATE KEY", Bytes: b}); err != nil {
return errors.Wrap(err, "sftp: failed to write ED25519 private key to disk")
if err := pem.Encode(o, &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(key),
}); err != nil {
return err
}
return nil
}
// generateECDSAPrivateKey generates a ECDSA-P256 private key that will be used by the SFTP server.
func (c *SFTPServer) generateECDSAPrivateKey() error {
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return err
}
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("ecdsa")), 0o755); err != nil {
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
}
o, err := os.OpenFile(c.PrivateKeyPath("ecdsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return err
}
defer o.Close()
privBytes, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return err
}
if err := pem.Encode(o, &pem.Block{
Type: "PRIVATE KEY",
Bytes: privBytes,
}); err != nil {
return err
}
return nil
}
// generateEd25519PrivateKey generates an ed25519 private key that will be used by the SFTP server.
func (c *SFTPServer) generateEd25519PrivateKey() error {
_, key, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return err
}
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("ed25519")), 0o755); err != nil {
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
}
o, err := os.OpenFile(c.PrivateKeyPath("ed25519"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return err
}
defer o.Close()
privBytes, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return err
}
if err := pem.Encode(o, &pem.Block{
Type: "PRIVATE KEY",
Bytes: privBytes,
}); err != nil {
return err
}
return nil
}
// PrivateKeyPath returns the path the host private key for this server instance.
func (c *SFTPServer) PrivateKeyPath(name string) string {
return path.Join(c.BasePath, ".sftp", "id_"+name)
}
// A function capable of validating user credentials with the Panel API.
func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
request := remote.SftpAuthRequest{
@@ -185,6 +305,7 @@ func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.
IP: conn.RemoteAddr().String(),
SessionID: conn.SessionID(),
ClientVersion: conn.ClientVersion(),
Type: "password",
}
logger := log.WithFields(log.Fields{"subsystem": "sftp", "username": conn.User(), "ip": conn.RemoteAddr().String()})
@@ -195,6 +316,11 @@ func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.
return nil, &remote.SftpInvalidCredentialsError{}
}
if len(pass) < 1 {
logger.Warn("failed to validate user credentials (invalid format)")
return nil, &remote.SftpInvalidCredentialsError{}
}
resp, err := c.manager.Client().ValidateSftpCredentials(context.Background(), request)
if err != nil {
if _, ok := err.(*remote.SftpInvalidCredentialsError); ok {
@@ -217,7 +343,55 @@ func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.
return sshPerm, nil
}
// PrivateKeyPath returns the path the host private key for this server instance.
func (c *SFTPServer) PrivateKeyPath() string {
return path.Join(c.BasePath, ".sftp/id_ed25519")
func (c *SFTPServer) publicKeyCallback(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
request := remote.SftpAuthRequest{
User: conn.User(),
Pass: "KEKW",
IP: conn.RemoteAddr().String(),
SessionID: conn.SessionID(),
ClientVersion: conn.ClientVersion(),
Type: "publicKey",
}
logger := log.WithFields(log.Fields{"subsystem": "sftp", "username": conn.User(), "ip": conn.RemoteAddr().String()})
logger.Debug("validating public key for SFTP connection")
if !validUsernameRegexp.MatchString(request.User) {
logger.Warn("failed to validate user credentials (invalid format)")
return nil, &remote.SftpInvalidCredentialsError{}
}
resp, err := c.manager.Client().ValidateSftpCredentials(context.Background(), request)
if err != nil {
if _, ok := err.(*remote.SftpInvalidCredentialsError); ok {
logger.Warn("failed to validate user credentials (invalid username or password)")
} else {
logger.WithField("error", err).Error("encountered an error while trying to validate user credentials")
}
return nil, err
}
if len(resp.SSHKeys) < 1 {
return nil, &remote.SftpInvalidCredentialsError{}
}
for _, k := range resp.SSHKeys {
storedPublicKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(k))
if err != nil {
return nil, err
}
if !bytes.Equal(key.Marshal(), storedPublicKey.Marshal()) {
continue
}
return &ssh.Permissions{
Extensions: map[string]string{
"uuid": resp.Server,
"user": conn.User(),
"permissions": strings.Join(resp.Permissions, ","),
},
}, nil
}
return nil, &remote.SftpInvalidCredentialsError{}
}

View File

@@ -42,7 +42,6 @@ func (l *Locker) Acquire() error {
return nil
}
// TryAcquire will attempt to acquire a power-lock until the context provided
// is canceled.
func (l *Locker) TryAcquire(ctx context.Context) error {
@@ -51,7 +50,9 @@ func (l *Locker) TryAcquire(ctx context.Context) error {
return nil
case <-ctx.Done():
if err := ctx.Err(); err != nil {
return err
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return ErrLockerLocked
}
}
return nil
}

View File

@@ -81,7 +81,7 @@ func TestPower(t *testing.T) {
err := l.TryAcquire(ctx)
g.Assert(err).IsNotNil()
g.Assert(errors.Is(err, context.DeadlineExceeded)).IsTrue()
g.Assert(errors.Is(err, ErrLockerLocked)).IsTrue()
g.Assert(cap(l.ch)).Equal(1)
g.Assert(len(l.ch)).Equal(1)
g.Assert(l.IsLocked()).IsTrue()
@@ -95,7 +95,7 @@ func TestPower(t *testing.T) {
l.Acquire()
go func() {
time.AfterFunc(time.Millisecond * 50, func() {
time.AfterFunc(time.Millisecond*50, func() {
l.Release()
})
}()

View File

@@ -44,7 +44,7 @@ func (r *Rate) Try() bool {
// Reset resets the internal state of the rate limiter back to zero.
func (r *Rate) Reset() {
r.mu.Lock()
r.count = 0
r.last = time.Now()
r.count = 0
r.last = time.Now()
r.mu.Unlock()
}

View File

@@ -47,7 +47,7 @@ func TestRate(t *testing.T) {
g.It("resets back to zero when called", func() {
r := NewRate(10, time.Second)
for i := 0; i < 100; i++ {
if i % 10 == 0 {
if i%10 == 0 {
r.Reset()
}
g.Assert(r.Try()).IsTrue()

View File

@@ -1,4 +1,4 @@
package server
package system
import (
"sync"
@@ -16,20 +16,20 @@ const (
InstallSink SinkName = "install"
)
// sinkPool represents a pool with sinks.
type sinkPool struct {
// SinkPool represents a pool with sinks.
type SinkPool struct {
mu sync.RWMutex
sinks []chan []byte
}
// newSinkPool returns a new empty sinkPool. A sink pool generally lives with a
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
// server instance for it's full lifetime.
func newSinkPool() *sinkPool {
return &sinkPool{}
func NewSinkPool() *SinkPool {
return &SinkPool{}
}
// On adds a channel to the sink pool instance.
func (p *sinkPool) On(c chan []byte) {
func (p *SinkPool) On(c chan []byte) {
p.mu.Lock()
p.sinks = append(p.sinks, c)
p.mu.Unlock()
@@ -37,7 +37,7 @@ func (p *sinkPool) On(c chan []byte) {
// Off removes a given channel from the sink pool. If no matching sink is found
// this function is a no-op. If a matching channel is found, it will be removed.
func (p *sinkPool) Off(c chan []byte) {
func (p *SinkPool) Off(c chan []byte) {
p.mu.Lock()
defer p.mu.Unlock()
@@ -66,7 +66,7 @@ func (p *sinkPool) Off(c chan []byte) {
// Destroy destroys the pool by removing and closing all sinks and destroying
// all of the channels that are present.
func (p *sinkPool) Destroy() {
func (p *SinkPool) Destroy() {
p.mu.Lock()
defer p.mu.Unlock()
@@ -95,7 +95,7 @@ func (p *sinkPool) Destroy() {
// likely the best option anyways. This uses waitgroups to allow every channel
// to attempt its send concurrently thus making the total blocking time of this
// function "O(1)" instead of "O(n)".
func (p *sinkPool) Push(data []byte) {
func (p *SinkPool) Push(data []byte) {
p.mu.RLock()
defer p.mu.RUnlock()
var wg sync.WaitGroup
@@ -119,24 +119,3 @@ func (p *sinkPool) Push(data []byte) {
}
wg.Wait()
}
// Sink returns the instantiated and named sink for a server. If the sink has
// not been configured yet this function will cause a panic condition.
func (s *Server) Sink(name SinkName) *sinkPool {
sink, ok := s.sinks[name]
if !ok {
s.Log().Fatalf("attempt to access nil sink: %s", name)
}
return sink
}
// DestroyAllSinks iterates over all of the sinks configured for the server and
// destroys their instances. Note that this will cause a panic if you attempt
// to call Server.Sink() again after. This function is only used when a server
// is being deleted from the system.
func (s *Server) DestroyAllSinks() {
s.Log().Info("destroying all registered sinks for server instance")
for _, sink := range s.sinks {
sink.Destroy()
}
}

View File

@@ -1,4 +1,4 @@
package server
package system
import (
"fmt"
@@ -23,7 +23,7 @@ func TestSink(t *testing.T) {
g.Describe("SinkPool#On", func() {
g.It("pushes additional channels to a sink", func() {
pool := &sinkPool{}
pool := &SinkPool{}
g.Assert(pool.sinks).IsZero()
@@ -36,9 +36,9 @@ func TestSink(t *testing.T) {
})
g.Describe("SinkPool#Off", func() {
var pool *sinkPool
var pool *SinkPool
g.BeforeEach(func() {
pool = &sinkPool{}
pool = &SinkPool{}
})
g.It("works when no sinks are registered", func() {
@@ -97,9 +97,9 @@ func TestSink(t *testing.T) {
})
g.Describe("SinkPool#Push", func() {
var pool *sinkPool
var pool *SinkPool
g.BeforeEach(func() {
pool = &sinkPool{}
pool = &SinkPool{}
})
g.It("works when no sinks are registered", func() {
@@ -190,9 +190,9 @@ func TestSink(t *testing.T) {
})
g.Describe("SinkPool#Destroy", func() {
var pool *sinkPool
var pool *SinkPool
g.BeforeEach(func() {
pool = &sinkPool{}
pool = &SinkPool{}
})
g.It("works if no sinks are registered", func() {

View File

@@ -3,12 +3,10 @@ package system
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"strconv"
"sync"
"time"
"emperror.dev/errors"
"github.com/goccy/go-json"
@@ -90,16 +88,16 @@ func ScanReader(r io.Reader, callback func(line []byte)) error {
} else {
buf.Write(line)
}
// If we encountered an error with something in ReadLine that was not an
// EOF just abort the entire process here.
if err != nil && err != io.EOF {
return err
}
// Finish this loop and begin outputting the line if there is no prefix
// (the line fit into the default buffer), or if we hit the end of the line.
if !isPrefix || err == io.EOF {
break
}
// If we encountered an error with something in ReadLine that was not an
// EOF just abort the entire process here.
if err != nil {
return err
}
}
// Send the full buffer length over to the event handler to be emitted in
@@ -122,22 +120,6 @@ func ScanReader(r io.Reader, callback func(line []byte)) error {
return nil
}
// Runs a given work function every "d" duration until the provided context is canceled.
func Every(ctx context.Context, d time.Duration, work func(t time.Time)) {
ticker := time.NewTicker(d)
go func() {
for {
select {
case <-ctx.Done():
ticker.Stop()
return
case t := <-ticker.C:
work(t)
}
}
}()
}
func FormatBytes(b int64) string {
if b < 1024 {
return fmt.Sprintf("%d B", b)