Compare commits

...

15 Commits

Author SHA1 Message Date
Pterodactyl CI
a76d84ea96 bump version for release 2022-09-26 01:24:05 +00:00
Matthew Penner
02cbf2df5b Update README.md 2022-09-25 19:16:29 -06:00
Matthew Penner
b6edf3acf9 environment(docker): set outgoing ip correctly (#135)
Closes https://github.com/pterodactyl/panel/issues/3841
2022-09-25 18:49:48 -06:00
PotatoMaaan
c686992e85 backups: add an option to change gzip compression level (#128) 2022-09-25 18:47:09 -06:00
camjac251
c736c24118 it's to its (#123) 2022-09-25 13:34:28 -06:00
Chance Callahan
9dfc651a91 rpm: update to 1.7.0 (#140) 2022-09-25 13:25:53 -06:00
Matthew Penner
ad26022c30 parser(yaml): fix issues
Closes https://github.com/pterodactyl/panel/issues/4236
Closes https://github.com/pterodactyl/wings/pull/139
2022-09-21 11:50:55 -06:00
DaneEveritt
83861a6dec Update CHANGELOG.md 2022-07-24 19:43:43 -04:00
DaneEveritt
231e24aa33 Support new metadata from panel for servers 2022-07-24 17:16:45 -04:00
DaneEveritt
e3ab241d7f Track file upload activity 2022-07-24 17:12:47 -04:00
DaneEveritt
c18e844689 Support more rapid insertion; ignore issues with i/o 2022-07-24 16:58:03 -04:00
DaneEveritt
8cee18a92b Save activity in a background routine to speed things along; cap query time at 3 seconds 2022-07-24 16:27:25 -04:00
DaneEveritt
f952efd9c7 Don't try to store nil for the metadata 2022-07-24 16:27:05 -04:00
DaneEveritt
21cf66b2b4 Use single connection in pool to avoid simultaneous write lock issues 2022-07-24 16:26:52 -04:00
DaneEveritt
251f91a08e Fix crons to actually run correctly using the configuration values 2022-07-24 15:59:17 -04:00
26 changed files with 181 additions and 76 deletions

View File

@@ -1,5 +1,21 @@
# Changelog # Changelog
## v1.7.1
### Fixed
* YAML parser has been updated to fix some strange issues
### Added
* Added `Force Outgoing IP` option for servers to ensure outgoing traffic uses the server's IP address
* Adds an option to control the level of gzip compression for backups
## v1.7.0
### Fixed
* Fixes multi-platform support for Wings' Docker image.
### Added
* Adds support for tracking of SFTP actions, power actions, server commands, and file uploads by utilizing a local SQLite database and processing events before sending them to the Panel.
* Adds support for configuring the MTU on the `pterodactyl0` network.
## v1.6.4 ## v1.6.4
### Fixed ### Fixed
* Fixes a bug causing CPU limiting to not be properly applied to servers. * Fixes a bug causing CPU limiting to not be properly applied to servers.

View File

@@ -58,7 +58,7 @@ func newDiagnosticsCommand() *cobra.Command {
return command return command
} }
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node. // diagnosticsCmdRun collects diagnostics about wings, its configuration and the node.
// We collect: // We collect:
// - wings and docker versions // - wings and docker versions
// - relevant parts of daemon configuration // - relevant parts of daemon configuration

View File

@@ -81,7 +81,7 @@ func init() {
rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default") rootCommand.Flags().Bool("pprof", false, "if the pprof profiler should be enabled. The profiler will bind to localhost:6060 by default")
rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts") rootCommand.Flags().Int("pprof-block-rate", 0, "enables block profile support, may have performance impacts")
rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on") rootCommand.Flags().Int("pprof-port", 6060, "If provided with --pprof, the port it will run on")
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt") rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage its own SSL certificates using Let's Encrypt")
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate") rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls") rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
@@ -162,7 +162,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
ticker := time.NewTicker(time.Minute) ticker := time.NewTicker(time.Minute)
// Every minute, write the current server states to the disk to allow for a more // Every minute, write the current server states to the disk to allow for a more
// seamless hard-reboot process in which wings will re-sync server states based // seamless hard-reboot process in which wings will re-sync server states based
// on it's last tracked state. // on its last tracked state.
go func() { go func() {
for { for {
select { select {

View File

@@ -167,10 +167,10 @@ type SystemConfiguration struct {
// being sent to the Panel. By default this will send activity collected over the last minute. Keep // being sent to the Panel. By default this will send activity collected over the last minute. Keep
// in mind that only a fixed number of activity log entries, defined by ActivitySendCount, will be sent // in mind that only a fixed number of activity log entries, defined by ActivitySendCount, will be sent
// in each run. // in each run.
ActivitySendInterval int64 `default:"60" yaml:"activity_send_interval"` ActivitySendInterval int `default:"60" yaml:"activity_send_interval"`
// ActivitySendCount is the number of activity events to send per batch. // ActivitySendCount is the number of activity events to send per batch.
ActivitySendCount int64 `default:"100" yaml:"activity_send_count"` ActivitySendCount int `default:"100" yaml:"activity_send_count"`
// If set to true, file permissions for a server will be checked when the process is // If set to true, file permissions for a server will be checked when the process is
// booted. This can cause boot delays if the server has a large amount of files. In most // booted. This can cause boot delays if the server has a large amount of files. In most
@@ -219,6 +219,15 @@ type Backups struct {
// //
// Defaults to 0 (unlimited) // Defaults to 0 (unlimited)
WriteLimit int `default:"0" yaml:"write_limit"` WriteLimit int `default:"0" yaml:"write_limit"`
// CompressionLevel determines how much backups created by wings should be compressed.
//
// "none" -> no compression will be applied
// "best_speed" -> uses gzip level 1 for fast speed
// "best_compression" -> uses gzip level 9 for minimal disk space useage
//
// Defaults to "best_speed" (level 1)
CompressionLevel string `default:"best_speed" yaml:"compression_level"`
} }
type Transfers struct { type Transfers struct {

View File

@@ -12,6 +12,11 @@ import (
// Defines the allocations available for a given server. When using the Docker environment // Defines the allocations available for a given server. When using the Docker environment
// driver these correspond to mappings for the container that allow external connections. // driver these correspond to mappings for the container that allow external connections.
type Allocations struct { type Allocations struct {
// ForceOutgoingIP causes a dedicated bridge network to be created for the
// server with a special option, causing Docker to SNAT outgoing traffic to
// the DefaultMapping's IP. This is important to servers which rely on external
// services that check the IP of the server (Source Engine servers, for example).
ForceOutgoingIP bool `json:"force_outgoing_ip"`
// Defines the default allocation that should be used for this server. This is // Defines the default allocation that should be used for this server. This is
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration // what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
// files or the startup arguments for a server. // files or the startup arguments for a server.

View File

@@ -41,12 +41,12 @@ func ConfigureDocker(ctx context.Context) error {
nw := config.Get().Docker.Network nw := config.Get().Docker.Network
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{}) resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
if err != nil { if err != nil {
if client.IsErrNotFound(err) { if !client.IsErrNotFound(err) {
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
if err := createDockerNetwork(ctx, cli); err != nil {
return err return err
} }
} else {
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
if err := createDockerNetwork(ctx, cli); err != nil {
return err return err
} }
} }

View File

@@ -147,10 +147,12 @@ func (e *Environment) InSituUpdate() error {
// currently available for it. If the container already exists it will be // currently available for it. If the container already exists it will be
// returned. // returned.
func (e *Environment) Create() error { func (e *Environment) Create() error {
ctx := context.Background()
// If the container already exists don't hit the user with an error, just return // If the container already exists don't hit the user with an error, just return
// the current information about it which is what we would do when creating the // the current information about it which is what we would do when creating the
// container anyways. // container anyways.
if _, err := e.ContainerInspect(context.Background()); err == nil { if _, err := e.ContainerInspect(ctx); err == nil {
return nil return nil
} else if !client.IsErrNotFound(err) { } else if !client.IsErrNotFound(err) {
return errors.Wrap(err, "environment/docker: failed to inspect container") return errors.Wrap(err, "environment/docker: failed to inspect container")
@@ -190,7 +192,34 @@ func (e *Environment) Create() error {
}, },
} }
tmpfsSize := strconv.Itoa(int(config.Get().Docker.TmpfsSize)) networkMode := container.NetworkMode(config.Get().Docker.Network.Mode)
if a.ForceOutgoingIP {
e.log().Debug("environment/docker: forcing outgoing IP address")
networkName := strings.ReplaceAll(e.Id, "-", "")
networkMode = container.NetworkMode(networkName)
if _, err := e.client.NetworkInspect(ctx, networkName, types.NetworkInspectOptions{}); err != nil {
if !client.IsErrNotFound(err) {
return err
}
if _, err := e.client.NetworkCreate(ctx, networkName, types.NetworkCreate{
Driver: "bridge",
EnableIPv6: false,
Internal: false,
Attachable: false,
Ingress: false,
ConfigOnly: false,
Options: map[string]string{
"encryption": "false",
"com.docker.network.bridge.default_bridge": "false",
"com.docker.network.host_ipv4": a.DefaultMapping.Ip,
},
}); err != nil {
return err
}
}
}
hostConf := &container.HostConfig{ hostConf := &container.HostConfig{
PortBindings: a.DockerBindings(), PortBindings: a.DockerBindings(),
@@ -202,7 +231,7 @@ func (e *Environment) Create() error {
// Configure the /tmp folder mapping in containers. This is necessary for some // Configure the /tmp folder mapping in containers. This is necessary for some
// games that need to make use of it for downloads and other installation processes. // games that need to make use of it for downloads and other installation processes.
Tmpfs: map[string]string{ Tmpfs: map[string]string{
"/tmp": "rw,exec,nosuid,size=" + tmpfsSize + "M", "/tmp": "rw,exec,nosuid,size=" + strconv.Itoa(int(config.Get().Docker.TmpfsSize)) + "M",
}, },
// Define resource limits for the container based on the data passed through // Define resource limits for the container based on the data passed through
@@ -231,10 +260,10 @@ func (e *Environment) Create() error {
"setpcap", "mknod", "audit_write", "net_raw", "dac_override", "setpcap", "mknod", "audit_write", "net_raw", "dac_override",
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap", "fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
}, },
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode), NetworkMode: networkMode,
} }
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil { if _, err := e.client.ContainerCreate(ctx, conf, hostConf, nil, nil, e.Id); err != nil {
return errors.Wrap(err, "environment/docker: failed to create container") return errors.Wrap(err, "environment/docker: failed to create container")
} }

2
go.mod
View File

@@ -49,6 +49,7 @@ require (
github.com/go-co-op/gocron v1.15.0 github.com/go-co-op/gocron v1.15.0
github.com/goccy/go-json v0.9.6 github.com/goccy/go-json v0.9.6
github.com/klauspost/compress v1.15.1 github.com/klauspost/compress v1.15.1
gopkg.in/yaml.v3 v3.0.1
gorm.io/gorm v1.23.8 gorm.io/gorm v1.23.8
) )
@@ -121,7 +122,6 @@ require (
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
google.golang.org/grpc v1.45.0 // indirect google.golang.org/grpc v1.45.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/libc v1.16.17 // indirect modernc.org/libc v1.16.17 // indirect
modernc.org/mathutil v1.4.1 // indirect modernc.org/mathutil v1.4.1 // indirect
modernc.org/memory v1.1.1 // indirect modernc.org/memory v1.1.1 // indirect

View File

@@ -12,7 +12,7 @@ import (
type activityCron struct { type activityCron struct {
mu *system.AtomicBool mu *system.AtomicBool
manager *server.Manager manager *server.Manager
max int64 max int
} }
// Run executes the cronjob and ensures we fetch and send all of the stored activity to the // Run executes the cronjob and ensures we fetch and send all of the stored activity to the
@@ -30,7 +30,7 @@ func (ac *activityCron) Run(ctx context.Context) error {
var activity []models.Activity var activity []models.Activity
tx := database.Instance().WithContext(ctx). tx := database.Instance().WithContext(ctx).
Where("event NOT LIKE ?", "server:sftp.%"). Where("event NOT LIKE ?", "server:sftp.%").
Limit(int(ac.max)). Limit(ac.max).
Find(&activity) Find(&activity)
if tx.Error != nil { if tx.Error != nil {

View File

@@ -3,7 +3,7 @@ package cron
import ( import (
"context" "context"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" log2 "github.com/apex/log"
"github.com/go-co-op/gocron" "github.com/go-co-op/gocron"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
@@ -40,7 +40,13 @@ func Scheduler(ctx context.Context, m *server.Manager) (*gocron.Scheduler, error
} }
s := gocron.NewScheduler(l) s := gocron.NewScheduler(l)
_, _ = s.Tag("activity").Every(config.Get().System.ActivitySendInterval).Seconds().Do(func() { log := log2.WithField("subsystem", "cron")
interval := time.Duration(config.Get().System.ActivitySendInterval) * time.Second
log.WithField("interval", interval).Info("configuring system crons")
_, _ = s.Tag("activity").Every(interval).Do(func() {
log.WithField("cron", "activity").Debug("sending internal activity events to Panel")
if err := activity.Run(ctx); err != nil { if err := activity.Run(ctx); err != nil {
if errors.Is(err, ErrCronRunning) { if errors.Is(err, ErrCronRunning) {
log.WithField("cron", "activity").Warn("activity process is already running, skipping...") log.WithField("cron", "activity").Warn("activity process is already running, skipping...")
@@ -50,7 +56,8 @@ func Scheduler(ctx context.Context, m *server.Manager) (*gocron.Scheduler, error
} }
}) })
_, _ = s.Tag("sftp").Every(config.Get().System.ActivitySendInterval).Seconds().Do(func() { _, _ = s.Tag("sftp").Every(interval).Do(func() {
log.WithField("cron", "sftp").Debug("sending sftp events to Panel")
if err := sftp.Run(ctx); err != nil { if err := sftp.Run(ctx); err != nil {
if errors.Is(err, ErrCronRunning) { if errors.Is(err, ErrCronRunning) {
log.WithField("cron", "sftp").Warn("sftp events process already running, skipping...") log.WithField("cron", "sftp").Warn("sftp events process already running, skipping...")

View File

@@ -7,14 +7,13 @@ import (
"github.com/pterodactyl/wings/internal/models" "github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
"gorm.io/gorm"
"reflect" "reflect"
) )
type sftpCron struct { type sftpCron struct {
mu *system.AtomicBool mu *system.AtomicBool
manager *server.Manager manager *server.Manager
max int64 max int
} }
type mapKey struct { type mapKey struct {
@@ -52,7 +51,7 @@ func (sc *sftpCron) Run(ctx context.Context) error {
events := &eventMap{ events := &eventMap{
m: map[mapKey]*models.Activity{}, m: map[mapKey]*models.Activity{},
ids: []int{}, ids: []int{},
max: int(sc.max), max: sc.max,
} }
for { for {
@@ -79,17 +78,13 @@ func (sc *sftpCron) Run(ctx context.Context) error {
if len(events.m) == 0 { if len(events.m) == 0 {
return nil return nil
} }
if err := sc.manager.Client().SendActivityLogs(ctx, events.Elements()); err != nil {
err = database.Instance().Transaction(func(tx *gorm.DB) error { return errors.Wrap(err, "failed to send sftp activity logs to Panel")
tx.Where("id IN ?", events.ids).Delete(&models.Activity{})
if tx.Error != nil {
return tx.Error
} }
if tx := database.Instance().Where("id IN ?", events.ids).Delete(&models.Activity{}); tx.Error != nil {
return sc.manager.Client().SendActivityLogs(ctx, events.Elements()) return errors.WithStack(tx.Error)
}) }
return nil
return errors.WithStack(err)
} }
// fetchRecords returns a group of activity events starting at the given offset. This is used // fetchRecords returns a group of activity events starting at the given offset. This is used
@@ -102,7 +97,7 @@ func (sc *sftpCron) fetchRecords(ctx context.Context, offset int) (activity []mo
Where("event LIKE ?", "server:sftp.%"). Where("event LIKE ?", "server:sftp.%").
Order("event DESC"). Order("event DESC").
Offset(offset). Offset(offset).
Limit(int(sc.max)). Limit(sc.max).
Find(&activity) Find(&activity)
if tx.Error != nil { if tx.Error != nil {
err = errors.WithStack(tx.Error) err = errors.WithStack(tx.Error)

View File

@@ -7,7 +7,9 @@ import (
"github.com/pterodactyl/wings/internal/models" "github.com/pterodactyl/wings/internal/models"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
"gorm.io/gorm" "gorm.io/gorm"
"gorm.io/gorm/logger"
"path/filepath" "path/filepath"
"time"
) )
var o system.AtomicBool var o system.AtomicBool
@@ -20,11 +22,25 @@ func Initialize() error {
panic("database: attempt to initialize more than once during application lifecycle") panic("database: attempt to initialize more than once during application lifecycle")
} }
p := filepath.Join(config.Get().System.RootDirectory, "wings.db") p := filepath.Join(config.Get().System.RootDirectory, "wings.db")
instance, err := gorm.Open(sqlite.Open(p), &gorm.Config{}) instance, err := gorm.Open(sqlite.Open(p), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
})
if err != nil { if err != nil {
return errors.Wrap(err, "database: could not open database file") return errors.Wrap(err, "database: could not open database file")
} }
db = instance db = instance
if sql, err := db.DB(); err != nil {
return errors.WithStack(err)
} else {
sql.SetMaxOpenConns(1)
sql.SetConnMaxLifetime(time.Hour)
}
if tx := db.Exec("PRAGMA synchronous = OFF"); tx.Error != nil {
return errors.WithStack(tx.Error)
}
if tx := db.Exec("PRAGMA journal_mode = MEMORY"); tx.Error != nil {
return errors.WithStack(tx.Error)
}
if err := db.AutoMigrate(&models.Activity{}); err != nil { if err := db.AutoMigrate(&models.Activity{}); err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} }

View File

@@ -60,5 +60,8 @@ func (a *Activity) BeforeCreate(_ *gorm.DB) error {
a.Timestamp = time.Now() a.Timestamp = time.Now()
} }
a.Timestamp = a.Timestamp.UTC() a.Timestamp = a.Timestamp.UTC()
if a.Metadata == nil {
a.Metadata = ActivityMeta{}
}
return nil return nil
} }

View File

@@ -15,7 +15,7 @@ import (
"github.com/icza/dyno" "github.com/icza/dyno"
"github.com/magiconair/properties" "github.com/magiconair/properties"
"gopkg.in/ini.v1" "gopkg.in/ini.v1"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v3"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
) )

View File

@@ -180,7 +180,7 @@ func postServerReinstall(c *gin.Context) {
c.Status(http.StatusAccepted) c.Status(http.StatusAccepted)
} }
// Deletes a server from the wings daemon and dissociate it's objects. // Deletes a server from the wings daemon and dissociate its objects.
func deleteServer(c *gin.Context) { func deleteServer(c *gin.Context) {
s := middleware.ExtractServer(c) s := middleware.ExtractServer(c)

View File

@@ -3,6 +3,7 @@ package router
import ( import (
"bufio" "bufio"
"context" "context"
"github.com/pterodactyl/wings/internal/models"
"io" "io"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
@@ -600,6 +601,11 @@ func postServerUploadFiles(c *gin.Context) {
if err := handleFileUpload(p, s, header); err != nil { if err := handleFileUpload(p, s, header); err != nil {
NewServerError(err, s).Abort(c) NewServerError(err, s).Abort(c)
return return
} else {
s.SaveActivity(s.NewRequestActivity(token.UserUuid, c.Request.RemoteAddr), server.ActivityFileUploaded, models.ActivityMeta{
"file": header.Filename,
"directory": filepath.Clean(directory),
})
} }
} }
} }
@@ -617,6 +623,5 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader)
if err := s.Filesystem().Writefile(p, file); err != nil { if err := s.Filesystem().Writefile(p, file); err != nil {
return err return err
} }
return nil return nil
} }

View File

@@ -8,6 +8,7 @@ type UploadPayload struct {
jwt.Payload jwt.Payload
ServerUuid string `json:"server_uuid"` ServerUuid string `json:"server_uuid"`
UserUuid string `json:"user_uuid"`
UniqueId string `json:"unique_id"` UniqueId string `json:"unique_id"`
} }

View File

@@ -370,7 +370,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
} }
if err == nil { if err == nil {
_ = h.ra.Save(h.server, models.Event(server.ActivityPowerPrefix+action), nil) h.server.SaveActivity(h.ra, models.Event(server.ActivityPowerPrefix+action), nil)
} }
return err return err
@@ -429,11 +429,13 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
} }
} }
_ = h.ra.Save(h.server, server.ActivityConsoleCommand, models.ActivityMeta{ if err := h.server.Environment.SendCommand(strings.Join(m.Args, "")); err != nil {
return err
}
h.server.SaveActivity(h.ra, server.ActivityConsoleCommand, models.ActivityMeta{
"command": strings.Join(m.Args, ""), "command": strings.Join(m.Args, ""),
}) })
return nil
return h.server.Environment.SendCommand(strings.Join(m.Args, ""))
} }
} }

View File

@@ -1,5 +1,5 @@
Name: ptero-wings Name: ptero-wings
Version: 1.5.3 Version: 1.7.0
Release: 1%{?dist} Release: 1%{?dist}
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind. Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
BuildArch: x86_64 BuildArch: x86_64
@@ -91,6 +91,9 @@ rm -rf /var/log/pterodactyl
wings --version wings --version
%changelog %changelog
* Wed Sep 14 2022 Chance Callahan <ccallaha@redhat.com> - 1.7.0-1
- Updating specfile to match stable release.
* Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1 * Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl - specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3 - Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3

View File

@@ -1,9 +1,11 @@
package server package server
import ( import (
"context"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/pterodactyl/wings/internal/database" "github.com/pterodactyl/wings/internal/database"
"github.com/pterodactyl/wings/internal/models" "github.com/pterodactyl/wings/internal/models"
"time"
) )
const ActivityPowerPrefix = "server:power." const ActivityPowerPrefix = "server:power."
@@ -15,6 +17,7 @@ const (
ActivitySftpCreateDirectory = models.Event("server:sftp.create-directory") ActivitySftpCreateDirectory = models.Event("server:sftp.create-directory")
ActivitySftpRename = models.Event("server:sftp.rename") ActivitySftpRename = models.Event("server:sftp.rename")
ActivitySftpDelete = models.Event("server:sftp.delete") ActivitySftpDelete = models.Event("server:sftp.delete")
ActivityFileUploaded = models.Event("server:file.uploaded")
) )
// RequestActivity is a wrapper around a LoggedEvent that is able to track additional request // RequestActivity is a wrapper around a LoggedEvent that is able to track additional request
@@ -34,29 +37,6 @@ func (ra RequestActivity) Event(event models.Event, metadata models.ActivityMeta
return a.SetUser(ra.user) return a.SetUser(ra.user)
} }
// Save creates a new event instance and saves it. If an error is encountered it is automatically
// logged to the provided server's error logging output. The error is also returned to the caller
// but can be ignored.
func (ra RequestActivity) Save(s *Server, event models.Event, metadata models.ActivityMeta) error {
if tx := database.Instance().Create(ra.Event(event, metadata)); tx.Error != nil {
err := errors.WithStackIf(tx.Error)
s.Log().WithField("error", err).WithField("event", event).Error("activity: failed to save event")
return err
}
return nil
}
// IP returns the IP address associated with this entry.
func (ra RequestActivity) IP() string {
return ra.ip
}
func (ra *RequestActivity) User() string {
return ra.user
}
// SetUser clones the RequestActivity struct and sets a new user value on the copy // SetUser clones the RequestActivity struct and sets a new user value on the copy
// before returning it. // before returning it.
func (ra RequestActivity) SetUser(u string) RequestActivity { func (ra RequestActivity) SetUser(u string) RequestActivity {
@@ -68,3 +48,17 @@ func (ra RequestActivity) SetUser(u string) RequestActivity {
func (s *Server) NewRequestActivity(user string, ip string) RequestActivity { func (s *Server) NewRequestActivity(user string, ip string) RequestActivity {
return RequestActivity{server: s.ID(), user: user, ip: ip} return RequestActivity{server: s.ID(), user: user, ip: ip}
} }
// SaveActivity saves an activity entry to the database in a background routine. If an error is
// encountered it is logged but not returned to the caller.
func (s *Server) SaveActivity(a RequestActivity, event models.Event, metadata models.ActivityMeta) {
ctx, cancel := context.WithTimeout(s.Context(), time.Second*3)
go func() {
defer cancel()
if tx := database.Instance().WithContext(ctx).Create(a.Event(event, metadata)); tx.Error != nil {
s.Log().WithField("error", errors.WithStack(tx.Error)).
WithField("event", event).
Error("activity: failed to save event")
}
}()
}

View File

@@ -16,6 +16,11 @@ type EggConfiguration struct {
FileDenylist []string `json:"file_denylist"` FileDenylist []string `json:"file_denylist"`
} }
type ConfigurationMeta struct {
Name string `json:"name"`
Description string `json:"description"`
}
type Configuration struct { type Configuration struct {
mu sync.RWMutex mu sync.RWMutex
@@ -24,6 +29,8 @@ type Configuration struct {
// docker containers as well as in log output. // docker containers as well as in log output.
Uuid string `json:"uuid"` Uuid string `json:"uuid"`
Meta ConfigurationMeta `json:"meta"`
// Whether or not the server is in a suspended state. Suspended servers cannot // Whether or not the server is in a suspended state. Suspended servers cannot
// be started or modified except in certain scenarios by an admin user. // be started or modified except in certain scenarios by an admin user.
Suspended bool `json:"suspended"` Suspended bool `json:"suspended"`

View File

@@ -62,8 +62,21 @@ func (a *Archive) Create(dst string) error {
writer = f writer = f
} }
// The default compression level is BestSpeed
var cl = pgzip.BestSpeed
// Choose which compression level to use based on the compression_level configuration option
switch config.Get().System.Backups.CompressionLevel {
case "none":
cl = pgzip.NoCompression
case "best_speed":
cl = pgzip.BestSpeed
case "best_compression":
cl = pgzip.BestCompression
}
// Create a new gzip writer around the file. // Create a new gzip writer around the file.
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed) gw, _ := pgzip.NewWriterLevel(writer, cl)
_ = gw.SetConcurrency(1<<20, 1) _ = gw.SetConcurrency(1<<20, 1)
defer gw.Close() defer gw.Close()
@@ -148,7 +161,7 @@ func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirw
// Adds a given file path to the final archive being created. // Adds a given file path to the final archive being created.
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error { func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
// Lstat the file, this will give us the same information as Stat except that it will not // Lstat the file, this will give us the same information as Stat except that it will not
// follow a symlink to it's target automatically. This is important to avoid including // follow a symlink to its target automatically. This is important to avoid including
// files that exist outside the server root unintentionally in the backup. // files that exist outside the server root unintentionally in the backup.
s, err := os.Lstat(p) s, err := os.Lstat(p)
if err != nil { if err != nil {

View File

@@ -71,7 +71,7 @@ func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
// If space is -1 or 0 just return true, means they're allowed unlimited. // If space is -1 or 0 just return true, means they're allowed unlimited.
// //
// Technically we could skip disk space calculation because we don't need to check if the // Technically we could skip disk space calculation because we don't need to check if the
// server exceeds it's limit but because this method caches the disk usage it would be best // server exceeds its limit but because this method caches the disk usage it would be best
// to calculate the disk usage and always return true. // to calculate the disk usage and always return true.
if fs.MaxDisk() == 0 { if fs.MaxDisk() == 0 {
return true return true

View File

@@ -44,7 +44,7 @@ func (eh *eventHandler) Log(e models.Event, fa FileAction) error {
} }
if tx := database.Instance().Create(a.SetUser(eh.user)); tx.Error != nil { if tx := database.Instance().Create(a.SetUser(eh.user)); tx.Error != nil {
return errors.Wrap(tx.Error, "sftp: failed to save event to database") return errors.WithStack(tx.Error)
} }
return nil return nil
} }
@@ -53,6 +53,6 @@ func (eh *eventHandler) Log(e models.Event, fa FileAction) error {
// if an error is encountered during the logging of the event. // if an error is encountered during the logging of the event.
func (eh *eventHandler) MustLog(e models.Event, fa FileAction) { func (eh *eventHandler) MustLog(e models.Event, fa FileAction) {
if err := eh.Log(e, fa); err != nil { if err := eh.Log(e, fa); err != nil {
log.WithField("error", err).Fatal("sftp: failed to log event") log.WithField("error", errors.WithStack(err)).WithField("event", e).Error("sftp: failed to log event")
} }
} }

View File

@@ -1,3 +1,3 @@
package system package system
var Version = "develop" var Version = "1.7.1"

View File

@@ -23,7 +23,7 @@ type SinkPool struct {
} }
// NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a // NewSinkPool returns a new empty SinkPool. A sink pool generally lives with a
// server instance for it's full lifetime. // server instance for its full lifetime.
func NewSinkPool() *SinkPool { func NewSinkPool() *SinkPool {
return &SinkPool{} return &SinkPool{}
} }