Compare commits

..

1 Commits

Author SHA1 Message Date
Dane Everitt
c125882d25 Deploy wings@1.0.0-beta.1 2020-04-12 19:14:24 -07:00
31 changed files with 416 additions and 925 deletions

View File

@@ -3,6 +3,7 @@ dist: xenial
language: go language: go
go: go:
- 1.11.x
- 1.13.x - 1.13.x
go_import_path: "github.com/pterodactyl/wings" go_import_path: "github.com/pterodactyl/wings"
@@ -18,8 +19,6 @@ install:
- go get github.com/haya14busa/goverage - go get github.com/haya14busa/goverage
- go get github.com/schrej/godacov - go get github.com/schrej/godacov
- go mod download
script: script:
- make cross-build - make cross-build
- goverage -v -coverprofile=coverage.out ./... - goverage -v -coverprofile=coverage.out ./...

View File

@@ -1,35 +0,0 @@
package api
import (
"encoding/json"
"fmt"
"github.com/pkg/errors"
)
type BackupRequest struct {
Checksum string `json:"checksum"`
Size int64 `json:"size"`
Successful bool `json:"successful"`
}
// Notifies the panel that a specific backup has been completed and is now
// available for a user to view and download.
func (r *PanelRequest) SendBackupStatus(backup string, data BackupRequest) (*RequestError, error) {
b, err := json.Marshal(data)
if err != nil {
return nil, errors.WithStack(err)
}
resp, err := r.Post(fmt.Sprintf("/backups/%s", backup), b)
if err != nil {
return nil, errors.WithStack(err)
}
defer resp.Body.Close()
r.Response = resp
if r.HasError() {
return r.Error(), nil
}
return nil, nil
}

View File

@@ -202,4 +202,30 @@ func (r *PanelRequest) SendTransferSuccess(uuid string) (*RequestError, error) {
} }
return nil, nil return nil, nil
} }
type BackupRequest struct {
Successful bool `json:"successful"`
Sha256Hash string `json:"sha256_hash"`
FileSize int64 `json:"file_size"`
}
func (r *PanelRequest) SendBackupStatus(uuid string, backup string, data BackupRequest) (*RequestError, error) {
b, err := json.Marshal(data)
if err != nil {
return nil, errors.WithStack(err)
}
resp, err := r.Post(fmt.Sprintf("/servers/%s/backup/%s", uuid, backup), b)
if err != nil {
return nil, errors.WithStack(err)
}
defer resp.Body.Close()
r.Response = resp
if r.HasError() {
return r.Error(), nil
}
return nil, nil
}

View File

@@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pterodactyl/sftp-server" "github.com/pterodactyl/sftp-server"
"go.uber.org/zap"
) )
func (r *PanelRequest) ValidateSftpCredentials(request sftp_server.AuthenticationRequest) (*sftp_server.AuthenticationResponse, error) { func (r *PanelRequest) ValidateSftpCredentials(request sftp_server.AuthenticationRequest) (*sftp_server.AuthenticationResponse, error) {
@@ -22,16 +21,11 @@ func (r *PanelRequest) ValidateSftpCredentials(request sftp_server.Authenticatio
r.Response = resp r.Response = resp
if r.HasError() { if r.HasError() {
if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 { if r.HttpResponseCode() == 403 {
zap.S().Debugw("failed to validate server credentials for SFTP", zap.String("error", r.Error().String())) return nil, sftp_server.InvalidCredentialsError{}
return nil, new(sftp_server.InvalidCredentialsError)
} }
rerr := errors.New(r.Error().String()) return nil, errors.WithStack(errors.New(r.Error().String()))
zap.S().Warnw("error validating SFTP credentials", zap.Error(rerr))
return nil, rerr
} }
response := new(sftp_server.AuthenticationResponse) response := new(sftp_server.AuthenticationResponse)

View File

@@ -22,7 +22,6 @@ var (
configureArgs struct { configureArgs struct {
PanelURL string PanelURL string
Token string Token string
ConfigPath string
Node string Node string
Override bool Override bool
AllowInsecure bool AllowInsecure bool
@@ -41,7 +40,6 @@ func init() {
configureCmd.PersistentFlags().StringVarP(&configureArgs.PanelURL, "panel-url", "p", "", "The base URL for this daemon's panel") configureCmd.PersistentFlags().StringVarP(&configureArgs.PanelURL, "panel-url", "p", "", "The base URL for this daemon's panel")
configureCmd.PersistentFlags().StringVarP(&configureArgs.Token, "token", "t", "", "The API key to use for fetching node information") configureCmd.PersistentFlags().StringVarP(&configureArgs.Token, "token", "t", "", "The API key to use for fetching node information")
configureCmd.PersistentFlags().StringVarP(&configureArgs.Node, "node", "n", "", "The ID of the node which will be connected to this daemon") configureCmd.PersistentFlags().StringVarP(&configureArgs.Node, "node", "n", "", "The ID of the node which will be connected to this daemon")
configureCmd.PersistentFlags().StringVarP(&configureArgs.ConfigPath, "config-path", "c", config.DefaultLocation, "The path where the configuration file should be made")
configureCmd.PersistentFlags().BoolVar(&configureArgs.Override, "override", false, "Set to true to override an existing configuration for this node") configureCmd.PersistentFlags().BoolVar(&configureArgs.Override, "override", false, "Set to true to override an existing configuration for this node")
configureCmd.PersistentFlags().BoolVar(&configureArgs.AllowInsecure, "allow-insecure", false, "Set to true to disable certificate checking") configureCmd.PersistentFlags().BoolVar(&configureArgs.AllowInsecure, "allow-insecure", false, "Set to true to disable certificate checking")
} }
@@ -53,7 +51,7 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
} }
} }
if _, err := os.Stat(configureArgs.ConfigPath); err == nil && !configureArgs.Override { if _, err := os.Stat("config.yml"); err == nil && !configureArgs.Override {
survey.AskOne(&survey.Confirm{Message: "Override existing configuration file"}, &configureArgs.Override) survey.AskOne(&survey.Confirm{Message: "Override existing configuration file"}, &configureArgs.Override)
if !configureArgs.Override { if !configureArgs.Override {
fmt.Println("Aborting process; a configuration file already exists for this node.") fmt.Println("Aborting process; a configuration file already exists for this node.")

View File

@@ -5,8 +5,6 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"os" "os"
"path"
"strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pkg/profile" "github.com/pkg/profile"
@@ -21,7 +19,7 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
var configPath = config.DefaultLocation var configPath = "config.yml"
var debug = false var debug = false
var shouldRunProfiler = false var shouldRunProfiler = false
@@ -33,41 +31,20 @@ var root = &cobra.Command{
} }
func init() { func init() {
root.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file") root.PersistentFlags().StringVar(&configPath, "config", "config.yml", "set the location for the configuration file")
root.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode") root.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
root.PersistentFlags().BoolVar(&shouldRunProfiler, "profile", false, "pass in order to profile wings") root.PersistentFlags().BoolVar(&shouldRunProfiler, "profile", false, "pass in order to profile wings")
root.AddCommand(configureCmd) root.AddCommand(configureCmd)
} }
// Get the configuration path based on the arguments provided.
func readConfiguration() (*config.Configuration, error) {
var p = configPath
if !strings.HasPrefix(p, "/") {
d, err := os.Getwd()
if err != nil {
return nil, err
}
p = path.Clean(path.Join(d, configPath))
}
if s, err := os.Stat(p); err != nil {
return nil, errors.WithStack(err)
} else if s.IsDir() {
return nil, errors.New("cannot use directory as configuration file path")
}
return config.ReadConfiguration(p)
}
func rootCmdRun(*cobra.Command, []string) { func rootCmdRun(*cobra.Command, []string) {
// Profile wings in production!!!! // Profile wings in production!!!!
if shouldRunProfiler { if shouldRunProfiler {
defer profile.Start().Stop() defer profile.Start().Stop()
} }
c, err := readConfiguration() c, err := config.ReadConfiguration(configPath)
if err != nil { if err != nil {
panic(err) panic(err)
} }
@@ -81,7 +58,7 @@ func rootCmdRun(*cobra.Command, []string) {
panic(err) panic(err)
} }
zap.S().Infof("using configuration from path: %s", c.GetPath()) zap.S().Infof("using configuration from path: %s", configPath)
if c.Debug { if c.Debug {
zap.S().Debugw("running in debug mode") zap.S().Debugw("running in debug mode")
zap.S().Infow("certificate checking is disabled") zap.S().Infow("certificate checking is disabled")
@@ -94,11 +71,6 @@ func rootCmdRun(*cobra.Command, []string) {
config.Set(c) config.Set(c)
config.SetDebugViaFlag(debug) config.SetDebugViaFlag(debug)
if err := c.System.ConfigureDirectories(); err != nil {
zap.S().Panicw("failed to configure system directories for pterodactyl", zap.Error(err))
return
}
zap.S().Infof("checking for pterodactyl system user \"%s\"", c.System.Username) zap.S().Infof("checking for pterodactyl system user \"%s\"", c.System.Username)
if su, err := c.EnsurePterodactylUser(); err != nil { if su, err := c.EnsurePterodactylUser(); err != nil {
zap.S().Panicw("failed to create pterodactyl system user", zap.Error(err)) zap.S().Panicw("failed to create pterodactyl system user", zap.Error(err))
@@ -264,9 +236,4 @@ func printLogo() {
fmt.Println(` \___/\___/___/___/___/___ /______/`) fmt.Println(` \___/\___/___/___/___/___ /______/`)
fmt.Println(` /_______/ v` + system.Version) fmt.Println(` /_______/ v` + system.Version)
fmt.Println() fmt.Println()
fmt.Println(`Website: https://pterodactyl.io`)
fmt.Println(`Source: https://github.com/pterodactyl/wings`)
fmt.Println()
fmt.Println(`Copyright © 2018 - 2020 Dane Everitt & Contributors`)
fmt.Println()
} }

View File

@@ -1,9 +1,7 @@
package config package config
import ( import (
"errors"
"fmt" "fmt"
"github.com/cobaugh/osrelease"
"github.com/creasty/defaults" "github.com/creasty/defaults"
"github.com/gbrlsnchs/jwt/v3" "github.com/gbrlsnchs/jwt/v3"
"go.uber.org/zap" "go.uber.org/zap"
@@ -19,14 +17,9 @@ import (
"sync" "sync"
) )
const DefaultLocation = "/var/lib/pterodactyl/config.yml"
type Configuration struct { type Configuration struct {
sync.RWMutex `json:"-" yaml:"-"` sync.RWMutex `json:"-" yaml:"-"`
// The location from which this configuration instance was instantiated.
path string
// Locker specific to writing the configuration to the disk, this happens // Locker specific to writing the configuration to the disk, this happens
// in areas that might already be locked so we don't want to crash the process. // in areas that might already be locked so we don't want to crash the process.
writeLock sync.Mutex writeLock sync.Mutex
@@ -82,6 +75,48 @@ type Configuration struct {
PanelLocation string `json:"remote" yaml:"remote"` PanelLocation string `json:"remote" yaml:"remote"`
} }
// Defines basic system configuration settings.
type SystemConfiguration struct {
// Directory where the server data is stored at.
Data string `default:"/srv/daemon-data" yaml:"data"`
// Directory where server archives for transferring will be stored.
ArchiveDirectory string `default:"/srv/daemon-data/.archives" yaml:"archive_directory"`
// Directory where local backups will be stored on the machine.
BackupDirectory string `default:"/srv/daemon-data/.backups" yaml:"backup_directory"`
// The user that should own all of the server files, and be used for containers.
Username string `default:"pterodactyl" yaml:"username"`
// Definitions for the user that gets created to ensure that we can quickly access
// this information without constantly having to do a system lookup.
User struct {
Uid int
Gid int
}
// Determines if permissions for a server should be set automatically on
// daemon boot. This can take a long time on systems with many servers, or on
// systems with servers containing thousands of files.
//
// Setting this to true by default helps us avoid a lot of support requests
// from people that keep trying to move files around as a root user leading
// to server permission issues.
//
// In production and heavy use environments where boot speed is essential,
// this should be set to false as servers will self-correct permissions on
// boot anyways.
SetPermissionsOnBoot bool `default:"true" yaml:"set_permissions_on_boot"`
// Determines if Wings should detect a server that stops with a normal exit code of
// "0" as being crashed if the process stopped without any Wings interaction. E.g.
// the user did not press the stop button, but the process stopped cleanly.
DetectCleanExitAsCrash bool `default:"true" yaml:"detect_clean_exit_as_crash"`
Sftp *SftpConfiguration `yaml:"sftp"`
}
// Defines the configuration of the internal SFTP server. // Defines the configuration of the internal SFTP server.
type SftpConfiguration struct { type SftpConfiguration struct {
// If set to false, the internal SFTP server will not be booted and you will need // If set to false, the internal SFTP server will not be booted and you will need
@@ -98,6 +133,54 @@ type SftpConfiguration struct {
ReadOnly bool `default:"false" yaml:"read_only"` ReadOnly bool `default:"false" yaml:"read_only"`
} }
type dockerNetworkInterfaces struct {
V4 struct {
Subnet string `default:"172.18.0.0/16"`
Gateway string `default:"172.18.0.1"`
}
V6 struct {
Subnet string `default:"fdba:17c8:6c94::/64"`
Gateway string `default:"fdba:17c8:6c94::1011"`
}
}
type DockerNetworkConfiguration struct {
// The interface that should be used to create the network. Must not conflict
// with any other interfaces in use by Docker or on the system.
Interface string `default:"172.18.0.1"`
// The name of the network to use. If this network already exists it will not
// be created. If it is not found, a new network will be created using the interface
// defined.
Name string `default:"pterodactyl_nw"`
ISPN bool `default:"false" yaml:"ispn"`
Driver string `default:"bridge"`
IsInternal bool `default:"false" yaml:"is_internal"`
EnableICC bool `default:"true" yaml:"enable_icc"`
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
}
// Defines the docker configuration used by the daemon when interacting with
// containers and networks on the system.
type DockerConfiguration struct {
// Network configuration that should be used when creating a new network
// for containers run through the daemon.
Network DockerNetworkConfiguration `json:"network" yaml:"network"`
// If true, container images will be updated when a server starts if there
// is an update available. If false the daemon will not attempt updates and will
// defer to the host system to manage image updates.
UpdateImages bool `default:"true" json:"update_images" yaml:"update_images"`
// The location of the Docker socket.
Socket string `default:"/var/run/docker.sock"`
// Defines the location of the timezone file on the host system that should
// be mounted into the created containers so that they all use the same time.
TimezonePath string `default:"/etc/timezone" json:"timezone_path" yaml:"timezone_path"`
}
// Defines the configuration for the internal API that is exposed by the // Defines the configuration for the internal API that is exposed by the
// daemon webserver. // daemon webserver.
type ApiConfiguration struct { type ApiConfiguration struct {
@@ -134,9 +217,6 @@ func ReadConfiguration(path string) (*Configuration, error) {
return nil, err return nil, err
} }
// Track the location where we created this configuration.
c.path = path
// Replace environment variables within the configuration file with their // Replace environment variables within the configuration file with their
// values from the host system. // values from the host system.
b = []byte(os.ExpandEnv(string(b))) b = []byte(os.ExpandEnv(string(b)))
@@ -189,11 +269,6 @@ func GetJwtAlgorithm() *jwt.HMACSHA {
return _jwtAlgo return _jwtAlgo
} }
// Returns the path for this configuration file.
func (c *Configuration) GetPath() string {
return c.path
}
// Ensures that the Pterodactyl core user exists on the system. This user will be the // Ensures that the Pterodactyl core user exists on the system. This user will be the
// owner of all data in the root data directory and is used as the user within containers. // owner of all data in the root data directory and is used as the user within containers.
// //
@@ -219,7 +294,7 @@ func (c *Configuration) EnsurePterodactylUser() (*user.User, error) {
// Alpine Linux is the only OS we currently support that doesn't work with the useradd command, so // Alpine Linux is the only OS we currently support that doesn't work with the useradd command, so
// in those cases we just modify the command a bit to work as expected. // in those cases we just modify the command a bit to work as expected.
if strings.HasPrefix(sysName, "alpine") { if strings.HasPrefix(sysName, "Alpine") {
command = fmt.Sprintf("adduser -S -D -H -G %[1]s -s /bin/false %[1]s", c.System.Username) command = fmt.Sprintf("adduser -S -D -H -G %[1]s -s /bin/false %[1]s", c.System.Username)
// We have to create the group first on Alpine, so do that here before continuing on // We have to create the group first on Alpine, so do that here before continuing on
@@ -320,10 +395,6 @@ func (c *Configuration) WriteToDisk() error {
ccopy.Debug = false ccopy.Debug = false
} }
if c.path == "" {
return errors.New("cannot write configuration, no path defined in struct")
}
b, err := yaml.Marshal(&ccopy) b, err := yaml.Marshal(&ccopy)
if err != nil { if err != nil {
return err return err
@@ -333,7 +404,7 @@ func (c *Configuration) WriteToDisk() error {
c.writeLock.Lock() c.writeLock.Lock()
defer c.writeLock.Unlock() defer c.writeLock.Unlock()
if err := ioutil.WriteFile(c.GetPath(), b, 0644); err != nil { if err := ioutil.WriteFile("config.yml", b, 0644); err != nil {
return err return err
} }
@@ -342,10 +413,20 @@ func (c *Configuration) WriteToDisk() error {
// Gets the system release name. // Gets the system release name.
func getSystemName() (string, error) { func getSystemName() (string, error) {
// use osrelease to get release version and ID // alpine doesn't have lsb_release
if release, err := osrelease.Read(); err != nil { _, err := os.Stat("/etc/alpine-release")
return "", err if os.IsNotExist(err) {
// if the alpine release file doesn't exist. run lsb_release
cmd := exec.Command("lsb_release", "-is")
b, err := cmd.Output()
if err != nil {
return "", err
}
return string(b), nil
} else { } else {
return release["ID"], nil // if the alpine release file does exist return string
return "Alpine", err
} }
} }

View File

@@ -1,52 +0,0 @@
package config
type dockerNetworkInterfaces struct {
V4 struct {
Subnet string `default:"172.18.0.0/16"`
Gateway string `default:"172.18.0.1"`
}
V6 struct {
Subnet string `default:"fdba:17c8:6c94::/64"`
Gateway string `default:"fdba:17c8:6c94::1011"`
}
}
type DockerNetworkConfiguration struct {
// The interface that should be used to create the network. Must not conflict
// with any other interfaces in use by Docker or on the system.
Interface string `default:"172.18.0.1"`
// The DNS settings for containers.
Dns []string `default:"[\"1.1.1.1\", \"1.0.0.1\"]"`
// The name of the network to use. If this network already exists it will not
// be created. If it is not found, a new network will be created using the interface
// defined.
Name string `default:"pterodactyl_nw"`
ISPN bool `default:"false" yaml:"ispn"`
Driver string `default:"bridge"`
IsInternal bool `default:"false" yaml:"is_internal"`
EnableICC bool `default:"true" yaml:"enable_icc"`
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
}
// Defines the docker configuration used by the daemon when interacting with
// containers and networks on the system.
type DockerConfiguration struct {
// Network configuration that should be used when creating a new network
// for containers run through the daemon.
Network DockerNetworkConfiguration `json:"network" yaml:"network"`
// If true, container images will be updated when a server starts if there
// is an update available. If false the daemon will not attempt updates and will
// defer to the host system to manage image updates.
UpdateImages bool `default:"true" json:"update_images" yaml:"update_images"`
// The location of the Docker socket.
Socket string `default:"/var/run/docker.sock"`
// Defines the location of the timezone file on the host system that should
// be mounted into the created containers so that they all use the same time.
TimezonePath string `default:"/etc/timezone" json:"timezone_path" yaml:"timezone_path"`
}

View File

@@ -1,96 +0,0 @@
package config
import (
"go.uber.org/zap"
"os"
"path"
)
// Defines basic system configuration settings.
type SystemConfiguration struct {
// The root directory where all of the pterodactyl data is stored at.
RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"`
// Directory where logs for server installations and other wings events are logged.
LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"`
// Directory where the server data is stored at.
Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"`
// Directory where server archives for transferring will be stored.
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"`
// Directory where local backups will be stored on the machine.
BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
// The user that should own all of the server files, and be used for containers.
Username string `default:"pterodactyl" yaml:"username"`
// Definitions for the user that gets created to ensure that we can quickly access
// this information without constantly having to do a system lookup.
User struct {
Uid int
Gid int
}
// Determines if permissions for a server should be set automatically on
// daemon boot. This can take a long time on systems with many servers, or on
// systems with servers containing thousands of files.
//
// Setting this to true by default helps us avoid a lot of support requests
// from people that keep trying to move files around as a root user leading
// to server permission issues.
//
// In production and heavy use environments where boot speed is essential,
// this should be set to false as servers will self-correct permissions on
// boot anyways.
SetPermissionsOnBoot bool `default:"true" yaml:"set_permissions_on_boot"`
// Determines if Wings should detect a server that stops with a normal exit code of
// "0" as being crashed if the process stopped without any Wings interaction. E.g.
// the user did not press the stop button, but the process stopped cleanly.
DetectCleanExitAsCrash bool `default:"true" yaml:"detect_clean_exit_as_crash"`
Sftp *SftpConfiguration `yaml:"sftp"`
}
// Ensures that all of the system directories exist on the system. These directories are
// created so that only the owner can read the data, and no other users.
func (sc *SystemConfiguration) ConfigureDirectories() error {
zap.S().Debugw("ensuring root data directory exists", zap.String("path", sc.RootDirectory))
if err := os.MkdirAll(sc.RootDirectory, 0700); err != nil {
return err
}
zap.S().Debugw("ensuring log directory exists", zap.String("path", sc.LogDirectory))
if err := os.MkdirAll(path.Join(sc.LogDirectory, "/install"), 0700); err != nil {
return err
}
zap.S().Debugw("ensuring server data directory exists", zap.String("path", sc.Data))
if err := os.MkdirAll(sc.Data, 0700); err != nil {
return err
}
zap.S().Debugw("ensuring archive data directory exists", zap.String("path", sc.ArchiveDirectory))
if err := os.MkdirAll(sc.ArchiveDirectory, 0700); err != nil {
return err
}
zap.S().Debugw("ensuring backup data directory exists", zap.String("path", sc.BackupDirectory))
if err := os.MkdirAll(sc.BackupDirectory, 0700); err != nil {
return err
}
return nil
}
// Returns the location of the JSON file that tracks server states.
func (sc *SystemConfiguration) GetStatesPath() string {
return path.Join(sc.RootDirectory, "states.json")
}
// Returns the location of the JSON file that tracks server states.
func (sc *SystemConfiguration) GetInstallLogPath() string {
return path.Join(sc.LogDirectory, "install/")
}

4
data/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
servers/*.yml
!install_logs/.gitkeep
install_logs/*
states.json

View File

12
go.mod
View File

@@ -19,7 +19,6 @@ require (
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
github.com/beevik/etree v1.1.0 github.com/beevik/etree v1.1.0
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929 github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
github.com/creasty/defaults v1.3.0 github.com/creasty/defaults v1.3.0
github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/distribution v2.7.1+incompatible // indirect
@@ -36,7 +35,6 @@ require (
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334
github.com/imdario/mergo v0.3.8 github.com/imdario/mergo v0.3.8
github.com/klauspost/pgzip v1.2.3
github.com/magiconair/properties v1.8.1 github.com/magiconair/properties v1.8.1
github.com/mattn/go-shellwords v1.0.10 // indirect github.com/mattn/go-shellwords v1.0.10 // indirect
github.com/mholt/archiver/v3 v3.3.0 github.com/mholt/archiver/v3 v3.3.0
@@ -47,24 +45,22 @@ require (
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.8.1
github.com/pkg/profile v1.4.0 github.com/pkg/profile v1.4.0
github.com/pkg/sftp v1.10.1 // indirect github.com/pkg/sftp v1.10.1 // indirect
github.com/pterodactyl/sftp-server v1.1.1 github.com/pterodactyl/sftp-server v1.1.1
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94
github.com/smartystreets/goconvey v1.6.4 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect
github.com/spf13/cobra v0.0.7 github.com/spf13/cobra v0.0.7
github.com/stretchr/testify v1.5.1 // indirect github.com/stretchr/testify v1.5.1 // indirect
go.uber.org/atomic v1.5.1 // indirect go.uber.org/atomic v1.5.1 // indirect
go.uber.org/multierr v1.4.0 // indirect go.uber.org/multierr v1.4.0 // indirect
go.uber.org/zap v1.13.0 go.uber.org/zap v1.13.0
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 // indirect golang.org/x/crypto v0.0.0-20200403201458-baeed622b8d8 // indirect
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a golang.org/x/sys v0.0.0-20200331124033-c3d80250170d // indirect
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b // indirect
golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/ini.v1 v1.51.0 gopkg.in/ini.v1 v1.51.0
gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 v2.2.8

22
go.sum
View File

@@ -29,8 +29,6 @@ github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929 h1:MW/JDk68Rny52y
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ= github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249 h1:R0IDH8daQ3lODvu8YtxnIqqth5qMGCJyADoUQvmLx4o=
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249/go.mod h1:EHKW9yNEYSBpTKzuu7Y9oOrft/UlzH57rMIB03oev6M=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
@@ -141,8 +139,6 @@ github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/pgzip v1.2.3 h1:Ce2to9wvs/cuJ2b86/CKQoTYr9VHfpanYosZ0UBJqdw=
github.com/klauspost/pgzip v1.2.3/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
@@ -203,8 +199,6 @@ github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI= github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE= github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
github.com/pkg/sftp v1.8.3 h1:9jSe2SxTM8/3bXZjtqnkgTBW+lA8db0knZJyns7gpBA= github.com/pkg/sftp v1.8.3 h1:9jSe2SxTM8/3bXZjtqnkgTBW+lA8db0knZJyns7gpBA=
@@ -229,8 +223,6 @@ github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce/go.mod h1:3j2
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0=
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@@ -298,8 +290,8 @@ golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8= golang.org/x/crypto v0.0.0-20200403201458-baeed622b8d8 h1:fpnn/HnJONpIu6hkXi1u/7rR0NzilgWr4T0JmWkEitk=
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200403201458-baeed622b8d8/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -328,8 +320,6 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -340,8 +330,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d h1:nc5K6ox/4lTFbMVSL9WRR81ixkcwXThoiF6yf+R9scA=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
@@ -360,8 +350,8 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290 h1:NXNmtp0ToD36cui5IqWy95LC4Y6vT/4y3RnPxlQPinU= golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b h1:AFZdJUT7jJYXQEC29hYH/WZkoV7+KhwxQGmdZ19yYoY=
golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools/gopls v0.1.3/go.mod h1:vrCQzOKxvuiZLjCKSmbbov04oeBQQOb4VQqwYK2PWIY= golang.org/x/tools/gopls v0.1.3/go.mod h1:vrCQzOKxvuiZLjCKSmbbov04oeBQQOb4VQqwYK2PWIY=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@@ -204,11 +204,6 @@ func (f *ConfigurationFile) parseXmlFile(path string) error {
// Ensure the XML is indented properly. // Ensure the XML is indented properly.
doc.Indent(2) doc.Indent(2)
// Truncate the file before attempting to write the changes.
if err := os.Truncate(path, 0); err != nil {
return err
}
// Write the XML to the file. // Write the XML to the file.
_, err = doc.WriteTo(file) _, err = doc.WriteTo(file)
@@ -266,11 +261,6 @@ func (f *ConfigurationFile) parseIniFile(path string) error {
} }
} }
// Truncate the file before attempting to write the changes.
if err := os.Truncate(path, 0); err != nil {
return err
}
if _, err := cfg.WriteTo(file); err != nil { if _, err := cfg.WriteTo(file); err != nil {
return err return err
} }
@@ -397,7 +387,7 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
} }
} }
w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -4,7 +4,6 @@ import (
"bufio" "bufio"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/router/tokens" "github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/server/backup"
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
@@ -26,20 +25,13 @@ func getDownloadBackup(c *gin.Context) {
return return
} }
b, st, err := backup.LocateLocal(token.BackupUuid) p, st, err := s.LocateBackup(token.BackupUuid)
if err != nil { if err != nil {
if os.IsNotExist(err) {
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
"error": "The requested backup was not found on this server.",
})
return
}
TrackedServerError(err, s).AbortWithServerError(c) TrackedServerError(err, s).AbortWithServerError(c)
return return
} }
f, err := os.Open(b.Path()) f, err := os.Open(p)
if err != nil { if err != nil {
TrackedServerError(err, s).AbortWithServerError(c) TrackedServerError(err, s).AbortWithServerError(c)
return return

View File

@@ -3,23 +3,26 @@ package router
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/backup"
"go.uber.org/zap" "go.uber.org/zap"
"net/http" "net/http"
"os"
) )
// Backs up a server. // Backs up a server.
func postServerBackup(c *gin.Context) { func postServerBackup(c *gin.Context) {
s := GetServer(c.Param("server")) s := GetServer(c.Param("server"))
data := &backup.LocalBackup{} var data struct{
Uuid string `json:"uuid"`
IgnoredFiles []string `json:"ignored_files"`
}
c.BindJSON(&data) c.BindJSON(&data)
go func(b *backup.LocalBackup, serv *server.Server) { go func(backup *server.Backup) {
if err := serv.BackupLocal(b); err != nil { if err := backup.BackupAndNotify(); err != nil {
zap.S().Errorw("failed to generate backup for server", zap.Error(err)) zap.S().Errorw("failed to generate backup for server", zap.Error(err))
} }
}(data, s) }(s.NewBackup(data.Uuid, data.IgnoredFiles))
c.Status(http.StatusAccepted) c.Status(http.StatusAccepted)
} }
@@ -28,13 +31,13 @@ func postServerBackup(c *gin.Context) {
func deleteServerBackup(c *gin.Context) { func deleteServerBackup(c *gin.Context) {
s := GetServer(c.Param("server")) s := GetServer(c.Param("server"))
b, _, err := backup.LocateLocal(c.Param("backup")) p, _, err := s.LocateBackup(c.Param("backup"))
if err != nil { if err != nil {
TrackedServerError(err, s).AbortWithServerError(c) TrackedServerError(err, s).AbortWithServerError(c)
return return
} }
if err := b.Remove(); err != nil { if err := os.Remove(p); err != nil {
TrackedServerError(err, s).AbortWithServerError(c) TrackedServerError(err, s).AbortWithServerError(c)
return return
} }

View File

@@ -17,14 +17,16 @@ import (
"time" "time"
) )
var alg *jwt.HMACSHA
const ( const (
PermissionConnect = "websocket.connect" PermissionConnect = "websocket.*"
PermissionSendCommand = "control.console" PermissionSendCommand = "control.console"
PermissionSendPowerStart = "control.start" PermissionSendPowerStart = "control.start"
PermissionSendPowerStop = "control.stop" PermissionSendPowerStop = "control.stop"
PermissionSendPowerRestart = "control.restart" PermissionSendPowerRestart = "control.restart"
PermissionReceiveErrors = "admin.websocket.errors" PermissionReceiveErrors = "admin.errors"
PermissionReceiveInstall = "admin.websocket.install" PermissionReceiveInstall = "admin.install"
PermissionReceiveBackups = "backup.read" PermissionReceiveBackups = "backup.read"
) )

View File

@@ -1,96 +1,210 @@
package server package server
import ( import (
"bufio" "crypto/sha256"
"encoding/hex"
"github.com/mholt/archiver/v3"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pterodactyl/wings/api" "github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/server/backup" "github.com/pterodactyl/wings/config"
"go.uber.org/zap" "go.uber.org/zap"
"io"
"os" "os"
"path" "path"
"strings"
"sync"
) )
// Notifies the panel of a backup's state and returns an error if one is encountered type Backup struct {
// while performing this action. Uuid string `json:"uuid"`
func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, successful bool) error { IgnoredFiles []string `json:"ignored_files"`
server *Server
localDirectory string
}
// Create a new Backup struct from data passed through in a request.
func (s *Server) NewBackup(uuid string, ignore []string) *Backup {
return &Backup{
Uuid: uuid,
IgnoredFiles: ignore,
server: s,
localDirectory: path.Join(config.Get().System.BackupDirectory, s.Uuid),
}
}
// Locates the backup for a server and returns the local path. This will obviously only
// work if the backup was created as a local backup.
func (s *Server) LocateBackup(uuid string) (string, os.FileInfo, error) {
p := path.Join(config.Get().System.BackupDirectory, s.Uuid, uuid+".tar.gz")
st, err := os.Stat(p)
if err != nil {
return "", nil, err
}
if st.IsDir() {
return "", nil, errors.New("invalid archive found; is directory")
}
return p, st, nil
}
// Ensures that the local backup destination for files exists.
func (b *Backup) ensureLocalBackupLocation() error {
if _, err := os.Stat(b.localDirectory); err != nil {
if !os.IsNotExist(err) {
return errors.WithStack(err)
}
return os.MkdirAll(b.localDirectory, 0700)
}
return nil
}
// Returns the path for this specific backup.
func (b *Backup) GetPath() string {
return path.Join(b.localDirectory, b.Uuid+".tar.gz")
}
func (b *Backup) GetChecksum() ([]byte, error) {
h := sha256.New()
f, err := os.Open(b.GetPath())
if err != nil {
return []byte{}, errors.WithStack(err)
}
defer f.Close()
if _, err := io.Copy(h, f); err != nil {
return []byte{}, errors.WithStack(err)
}
return h.Sum(nil), nil
}
// Generates a backup of the selected files and pushes it to the defined location
// for this instance.
func (b *Backup) Backup() (*api.BackupRequest, error) {
rootPath := b.server.Filesystem.Path()
if err := b.ensureLocalBackupLocation(); err != nil {
return nil, errors.WithStack(err)
}
zap.S().Debugw("starting archive of server files for backup", zap.String("server", b.server.Uuid), zap.String("backup", b.Uuid))
if err := archiver.Archive([]string{rootPath}, b.GetPath()); err != nil {
if strings.HasPrefix(err.Error(), "file already exists") {
zap.S().Debugw("backup already exists on system, removing and re-attempting", zap.String("backup", b.Uuid))
if rerr := os.Remove(b.GetPath()); rerr != nil {
return nil, errors.WithStack(rerr)
}
// Re-attempt this backup.
return b.Backup()
}
// If there was some error with the archive, just go ahead and ensure the backup
// is completely destroyed at this point. Ignore any errors from this function.
os.Remove(b.GetPath())
return nil, err
}
wg := sync.WaitGroup{}
wg.Add(2)
var checksum string
// Calculate the checksum for the file.
go func() {
defer wg.Done()
resp, err := b.GetChecksum()
if err != nil {
zap.S().Errorw("failed to calculate checksum for backup", zap.String("backup", b.Uuid), zap.Error(err))
}
checksum = hex.EncodeToString(resp)
}()
var s int64
go func() {
defer wg.Done()
st, err := os.Stat(b.GetPath())
if err != nil {
return
}
s = st.Size()
}()
wg.Wait()
return &api.BackupRequest{
Successful: true,
Sha256Hash: checksum,
FileSize: s,
}, nil
}
// Performs a server backup and then notifies the Panel of the completed status
// so that the backup shows up for the user correctly.
func (b *Backup) BackupAndNotify() error {
resp, err := b.Backup()
if err != nil {
b.notifyPanel(resp)
return errors.WithStack(err)
}
if err := b.notifyPanel(resp); err != nil {
// These errors indicate that the Panel will not know about the status of this
// backup, so let's just go ahead and delete it, and let the Panel handle the
// cleanup process for the backups.
//
// @todo perhaps in the future we can sync the backups from the servers on boot?
os.Remove(b.GetPath())
return err
}
// Emit an event over the socket so we can update the backup in realtime on
// the frontend for the server.
b.server.Events().PublishJson(BackupCompletedEvent+":"+b.Uuid, map[string]interface{}{
"uuid": b.Uuid,
"sha256_hash": resp.Sha256Hash,
"file_size": resp.FileSize,
})
return nil
}
func (b *Backup) notifyPanel(request *api.BackupRequest) error {
r := api.NewRequester() r := api.NewRequester()
rerr, err := r.SendBackupStatus(uuid, ad.ToRequest(successful))
rerr, err := r.SendBackupStatus(b.server.Uuid, b.Uuid, *request)
if rerr != nil || err != nil { if rerr != nil || err != nil {
if err != nil { if err != nil {
zap.S().Errorw( zap.S().Errorw(
"failed to notify panel of backup status due to internal code error", "failed to notify panel of backup status due to internal code error",
zap.String("backup", s.Uuid), zap.String("server", b.server.Uuid),
zap.String("backup", b.Uuid),
zap.Error(err), zap.Error(err),
) )
return err return err
} }
zap.S().Warnw(rerr.String(), zap.String("backup", uuid)) zap.S().Warnw(
rerr.String(),
zap.String("server", b.server.Uuid),
zap.String("backup", b.Uuid),
)
return errors.New(rerr.String()) return errors.New(rerr.String())
} }
return nil return nil
} }
// Performs a server backup and then emits the event over the server websocket. We
// let the actual backup system handle notifying the panel of the status, but that
// won't emit a websocket event.
func (s *Server) BackupLocal(b *backup.LocalBackup) error {
// If no ignored files are present in the request, check for a .pteroignore file in the root
// of the server files directory, and use that to generate the backup.
if len(b.IgnoredFiles) == 0 {
f, err := os.Open(path.Join(s.Filesystem.Path(), ".pteroignore"))
if err != nil {
if !os.IsNotExist(err) {
zap.S().Warnw("failed to open .pteroignore file in server directory", zap.String("server", s.Uuid), zap.Error(errors.WithStack(err)))
}
} else {
scanner := bufio.NewScanner(f)
for scanner.Scan() {
// Only include non-empty lines, for the sake of clarity...
if t := scanner.Text(); t != "" {
b.IgnoredFiles = append(b.IgnoredFiles, t)
}
}
if err := scanner.Err(); err != nil {
zap.S().Warnw("failed to scan .pteroignore file for lines", zap.String("server", s.Uuid), zap.Error(errors.WithStack(err)))
}
}
}
// Get the included files based on the root path and the ignored files provided.
inc, err := s.Filesystem.GetIncludedFiles(s.Filesystem.Path(), b.IgnoredFiles)
if err != nil {
return errors.WithStack(err)
}
if err := b.Backup(inc, s.Filesystem.Path()); err != nil {
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
zap.S().Warnw("failed to notify panel of failed backup state", zap.String("backup", b.Uuid), zap.Error(err))
}
return errors.WithStack(err)
}
// Try to notify the panel about the status of this backup. If for some reason this request
// fails, delete the archive from the daemon and return that error up the chain to the caller.
ad := b.Details()
if notifyError := s.notifyPanelOfBackup(b.Identifier(), ad, true); notifyError != nil {
b.Remove()
return notifyError
}
// Emit an event over the socket so we can update the backup in realtime on
// the frontend for the server.
s.Events().PublishJson(BackupCompletedEvent+":"+b.Uuid, map[string]interface{}{
"uuid": b.Uuid,
"sha256_hash": ad.Checksum,
"file_size": ad.Size,
})
return nil
}

View File

@@ -1,111 +0,0 @@
package backup
import (
"archive/tar"
"context"
gzip "github.com/klauspost/pgzip"
"github.com/remeh/sizedwaitgroup"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"io"
"os"
"strings"
"sync"
)
type Archive struct {
sync.Mutex
TrimPrefix string
Files *IncludedFiles
}
// Creates an archive at dest with all of the files definied in the included files struct.
func (a *Archive) Create(dest string, ctx context.Context) error {
f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
gzw := gzip.NewWriter(f)
defer gzw.Close()
tw := tar.NewWriter(gzw)
defer tw.Close()
wg := sizedwaitgroup.New(10)
g, ctx := errgroup.WithContext(ctx)
// Iterate over all of the files to be included and put them into the archive. This is
// done as a concurrent goroutine to speed things along. If an error is encountered at
// any step, the entire process is aborted.
for p, s := range a.Files.All() {
if (*s).IsDir() {
continue
}
pa := p
st := s
g.Go(func() error {
wg.Add()
defer wg.Done()
select {
case <-ctx.Done():
return ctx.Err()
default:
return a.addToArchive(pa, st, tw)
}
})
}
// Block until the entire routine is completed.
if err := g.Wait(); err != nil {
f.Close()
// Attempt to remove the archive if there is an error, report that error to
// the logger if it fails.
if rerr := os.Remove(dest); rerr != nil && !os.IsNotExist(rerr) {
zap.S().Warnw("failed to delete corrupted backup archive", zap.String("location", dest))
}
return err
}
return nil
}
// Adds a single file to the existing tar archive writer.
func (a *Archive) addToArchive(p string, s *os.FileInfo, w *tar.Writer) error {
f, err := os.Open(p)
if err != nil {
return err
}
defer f.Close()
st := *s
header := &tar.Header{
// Trim the long server path from the name of the file so that the resulting
// archive is exactly how the user would see it in the panel file manager.
Name: strings.TrimPrefix(p, a.TrimPrefix),
Size: st.Size(),
Mode: int64(st.Mode()),
ModTime: st.ModTime(),
}
// These actions must occur sequentially, even if this function is called multiple
// in parallel. You'll get some nasty panic's otherwise.
a.Lock()
defer a.Unlock()
if err = w.WriteHeader(header); err != nil {
return err
}
if _, err := io.Copy(w, f); err != nil {
return err
}
return nil
}

View File

@@ -1,42 +0,0 @@
package backup
import (
"github.com/pterodactyl/wings/api"
)
type Backup interface {
// Returns the UUID of this backup as tracked by the panel instance.
Identifier() string
// Generates a backup in whatever the configured source for the specific
// implementation is.
Backup(*IncludedFiles, string) error
// Returns a SHA256 checksum for the generated backup.
Checksum() ([]byte, error)
// Returns the size of the generated backup.
Size() (int64, error)
// Returns the path to the backup on the machine. This is not always the final
// storage location of the backup, simply the location we're using to store
// it until it is moved to the final spot.
Path() string
// Returns details about the archive.
Details() *ArchiveDetails
}
type ArchiveDetails struct {
Checksum string `json:"checksum"`
Size int64 `json:"size"`
}
// Returns a request object.
func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
return api.BackupRequest{
Checksum: ad.Checksum,
Size: ad.Size,
Successful: successful,
}
}

View File

@@ -1,154 +0,0 @@
package backup
import (
"context"
"crypto/sha256"
"encoding/hex"
"github.com/pkg/errors"
"github.com/pterodactyl/wings/config"
"go.uber.org/zap"
"io"
"os"
"path"
"sync"
)
type LocalBackup struct {
// The UUID of this backup object. This must line up with a backup from
// the panel instance.
Uuid string `json:"uuid"`
// An array of files to ignore when generating this backup. This should be
// compatible with a standard .gitignore structure.
IgnoredFiles []string `json:"ignored_files"`
}
var _ Backup = (*LocalBackup)(nil)
// Locates the backup for a server and returns the local path. This will obviously only
// work if the backup was created as a local backup.
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
b := &LocalBackup{
Uuid: uuid,
IgnoredFiles: nil,
}
st, err := os.Stat(b.Path())
if err != nil {
return nil, nil, err
}
if st.IsDir() {
return nil, nil, errors.New("invalid archive found; is directory")
}
return b, st, nil
}
func (b *LocalBackup) Identifier() string {
return b.Uuid
}
// Returns the path for this specific backup.
func (b *LocalBackup) Path() string {
return path.Join(config.Get().System.BackupDirectory, b.Uuid+".tar.gz")
}
// Returns the SHA256 checksum of a backup.
func (b *LocalBackup) Checksum() ([]byte, error) {
h := sha256.New()
f, err := os.Open(b.Path())
if err != nil {
return []byte{}, errors.WithStack(err)
}
defer f.Close()
if _, err := io.Copy(h, f); err != nil {
return []byte{}, errors.WithStack(err)
}
return h.Sum(nil), nil
}
// Removes a backup from the system.
func (b *LocalBackup) Remove() error {
return os.Remove(b.Path())
}
// Generates a backup of the selected files and pushes it to the defined location
// for this instance.
func (b *LocalBackup) Backup(included *IncludedFiles, prefix string) error {
a := &Archive{
TrimPrefix: prefix,
Files: included,
}
err := a.Create(b.Path(), context.Background())
return err
}
// Return the size of the generated backup.
func (b *LocalBackup) Size() (int64, error) {
st, err := os.Stat(b.Path())
if err != nil {
return 0, errors.WithStack(err)
}
return st.Size(), nil
}
// Returns details of the archive by utilizing two go-routines to get the checksum and
// the size of the archive.
func (b *LocalBackup) Details() *ArchiveDetails {
wg := sync.WaitGroup{}
wg.Add(2)
var checksum string
// Calculate the checksum for the file.
go func() {
defer wg.Done()
resp, err := b.Checksum()
if err != nil {
zap.S().Errorw("failed to calculate checksum for backup", zap.String("backup", b.Uuid), zap.Error(err))
}
checksum = hex.EncodeToString(resp)
}()
var sz int64
go func() {
defer wg.Done()
st, err := os.Stat(b.Path())
if err != nil {
return
}
sz = st.Size()
}()
wg.Wait()
return &ArchiveDetails{
Checksum: checksum,
Size: sz,
}
}
// Ensures that the local backup destination for files exists.
func (b *LocalBackup) ensureLocalBackupLocation() error {
d := config.Get().System.BackupDirectory
if _, err := os.Stat(d); err != nil {
if !os.IsNotExist(err) {
return errors.WithStack(err)
}
return os.MkdirAll(d, 0700)
}
return nil
}

View File

@@ -1,37 +0,0 @@
package backup
type S3Backup struct {
// The UUID of this backup object. This must line up with a backup from
// the panel instance.
Uuid string
// An array of files to ignore when generating this backup. This should be
// compatible with a standard .gitignore structure.
IgnoredFiles []string
}
var _ Backup = (*S3Backup)(nil)
func (s *S3Backup) Identifier() string {
return s.Uuid
}
func (s *S3Backup) Backup(included *IncludedFiles, prefix string) error {
panic("implement me")
}
func (s *S3Backup) Checksum() ([]byte, error) {
return []byte(""), nil
}
func (s *S3Backup) Size() (int64, error) {
return 0, nil
}
func (s *S3Backup) Path() string {
return ""
}
func (s *S3Backup) Details() *ArchiveDetails {
return &ArchiveDetails{}
}

View File

@@ -1,31 +0,0 @@
package backup
import (
"os"
"sync"
)
type IncludedFiles struct {
sync.RWMutex
files map[string]*os.FileInfo
}
// Pushes an additional file or folder onto the struct.
func (i *IncludedFiles) Push(info *os.FileInfo, p string) {
i.Lock()
defer i.Unlock()
if i.files == nil {
i.files = make(map[string]*os.FileInfo)
}
i.files[p] = info
}
// Returns all of the files that were marked as being included.
func (i *IncludedFiles) All() map[string]*os.FileInfo {
i.RLock()
defer i.RUnlock()
return i.files
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"go.uber.org/zap" "go.uber.org/zap"
"io" "io"
"math"
"os" "os"
"strconv" "strconv"
"strings" "strings"
@@ -147,10 +146,6 @@ func (d *DockerEnvironment) OnBeforeStart() error {
return err return err
} }
if !d.Server.Filesystem.HasSpaceAvailable() {
return errors.New("cannot start server, not enough disk space available")
}
// Always destroy and re-create the server container to ensure that synced data from // Always destroy and re-create the server container to ensure that synced data from
// the Panel is used. // the Panel is used.
if err := d.Client.ContainerRemove(context.Background(), d.Server.Uuid, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil { if err := d.Client.ContainerRemove(context.Background(), d.Server.Uuid, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
@@ -619,7 +614,8 @@ func (d *DockerEnvironment) Create() error {
// from the Panel. // from the Panel.
Resources: d.getResourcesForServer(), Resources: d.getResourcesForServer(),
DNS: config.Get().Docker.Network.Dns, // @todo make this configurable again
DNS: []string{"1.1.1.1", "8.8.8.8"},
// Configure logging for the container to make it easier on the Daemon to grab // Configure logging for the container to make it easier on the Daemon to grab
// the server output. Ensure that we don't use too much space on the host machine // the server output. Ensure that we don't use too much space on the host machine
@@ -825,18 +821,9 @@ func (d *DockerEnvironment) exposedPorts() nat.PortSet {
// Formats the resources available to a server instance in such as way that Docker will // Formats the resources available to a server instance in such as way that Docker will
// generate a matching environment in the container. // generate a matching environment in the container.
func (d *DockerEnvironment) getResourcesForServer() container.Resources { func (d *DockerEnvironment) getResourcesForServer() container.Resources {
overhead := 1.05
// Set the hard limit for memory usage to be 5% more than the amount of memory assigned to
// the server. If the memory limit for the server is < 4G, use 10%, if less than 2G use
// 15%. This avoids unexpected crashes from processes like Java which run over the limit.
if d.Server.Build.MemoryLimit <= 2048 {
overhead = 1.15
} else if d.Server.Build.MemoryLimit <= 4096 {
overhead = 1.10;
}
return container.Resources{ return container.Resources{
Memory: int64(math.Round(float64(d.Server.Build.MemoryLimit) * 1000000.0 * overhead)), // @todo memory limit should be slightly higher than the reservation
Memory: d.Server.Build.MemoryLimit * 1000000,
MemoryReservation: d.Server.Build.MemoryLimit * 1000000, MemoryReservation: d.Server.Build.MemoryLimit * 1000000,
MemorySwap: d.Server.Build.ConvertedSwap(), MemorySwap: d.Server.Build.ConvertedSwap(),
CPUQuota: d.Server.Build.ConvertedCpuLimit(), CPUQuota: d.Server.Build.ConvertedCpuLimit(),

View File

@@ -3,14 +3,11 @@ package server
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/gabriel-vasile/mimetype" "github.com/gabriel-vasile/mimetype"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/server/backup"
ignore "github.com/sabhiram/go-gitignore"
"go.uber.org/zap" "go.uber.org/zap"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -70,7 +67,7 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
// Range over all of the path parts and form directory pathings from the end // Range over all of the path parts and form directory pathings from the end
// moving up until we have a valid resolution or we run out of paths to try. // moving up until we have a valid resolution or we run out of paths to try.
for k := range parts { for k := range parts {
try = strings.Join(parts[:(len(parts)-k)], "/") try = strings.Join(parts[:(len(parts) - k)], "/")
if !strings.HasPrefix(try, fs.Path()) { if !strings.HasPrefix(try, fs.Path()) {
break break
@@ -121,27 +118,23 @@ func (fs *Filesystem) HasSpaceAvailable() bool {
return true return true
} }
// If we have a match in the cache, use that value in the return. No need to perform an expensive var size int64
// disk operation, even if this is an empty value.
if x, exists := fs.Server.Cache.Get("disk_used"); exists { if x, exists := fs.Server.Cache.Get("disk_used"); exists {
fs.Server.Resources.Disk = x.(int64) size = x.(int64)
return (x.(int64) / 1000.0 / 1000.0) <= space
} }
// If there is no size its either because there is no data (in which case running this function // If there is no size its either because there is no data (in which case running this function
// will have effectively no impact), or there is nothing in the cache, in which case we need to // will have effectively no impact), or there is nothing in the cache, in which case we need to
// grab the size of their data directory. This is a taxing operation, so we want to store it in // grab the size of their data directory. This is a taxing operation, so we want to store it in
// the cache once we've gotten it. // the cache once we've gotten it.
size, err := fs.DirectorySize("/") if size == 0 {
if err != nil { if size, err := fs.DirectorySize("/"); err != nil {
zap.S().Warnw("failed to determine directory size", zap.String("server", fs.Server.Uuid), zap.Error(err)) zap.S().Warnw("failed to determine directory size", zap.String("server", fs.Server.Uuid), zap.Error(err))
} else {
fs.Server.Cache.Set("disk_used", size, time.Second * 60)
}
} }
// Always cache the size, even if there is an error. We want to always return that value
// so that we don't cause an endless loop of determining the disk size if there is a temporary
// error encountered.
fs.Server.Cache.Set("disk_used", size, time.Second*60)
// Determine if their folder size, in bytes, is smaller than the amount of space they've // Determine if their folder size, in bytes, is smaller than the amount of space they've
// been allocated. // been allocated.
fs.Server.Resources.Disk = size fs.Server.Resources.Disk = size
@@ -153,20 +146,42 @@ func (fs *Filesystem) HasSpaceAvailable() bool {
// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation // through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
// on locations with tons of files, so it is recommended that you cache the output. // on locations with tons of files, so it is recommended that you cache the output.
func (fs *Filesystem) DirectorySize(dir string) (int64, error) { func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
w := fs.NewWalker()
ctx := context.Background()
var size int64 var size int64
err := w.Walk(dir, ctx, func(f os.FileInfo, _ string) bool { var wg sync.WaitGroup
// Only increment the size when we're dealing with a file specifically, otherwise
// just continue digging deeper until there are no more directories to iterate over. cleaned, err := fs.SafePath(dir)
if !f.IsDir() { if err != nil {
return 0, err
}
files, err := ioutil.ReadDir(cleaned)
if err != nil {
return 0, err
}
// Iterate over all of the files and directories. If it is a file, immediately add its size
// to the total size being returned. If we're dealing with a directory, call this function
// on a seperate thread until we have gotten the size of everything nested within the given
// directory.
for _, f := range files {
if f.IsDir() {
wg.Add(1)
go func(p string) {
defer wg.Done()
s, _ := fs.DirectorySize(p)
atomic.AddInt64(&size, s)
}(filepath.Join(cleaned, f.Name()))
} else {
atomic.AddInt64(&size, f.Size()) atomic.AddInt64(&size, f.Size())
} }
return true }
})
return size, err wg.Wait()
return size, nil
} }
// Reads a file on the system and returns it as a byte representation in a file // Reads a file on the system and returns it as a byte representation in a file
@@ -563,43 +578,3 @@ func (fs *Filesystem) EnsureDataDirectory() error {
return nil return nil
} }
// Given a directory, iterate through all of the files and folders within it and determine
// if they should be included in the output based on an array of ignored matches. This uses
// standard .gitignore formatting to make that determination.
//
// If no ignored files are passed through you'll get the entire directory listing.
func (fs *Filesystem) GetIncludedFiles(dir string, ignored []string) (*backup.IncludedFiles, error) {
cleaned, err := fs.SafePath(dir)
if err != nil {
return nil, err
}
w := fs.NewWalker()
ctx := context.Background()
i, err := ignore.CompileIgnoreLines(ignored...)
if err != nil {
return nil, err
}
// Walk through all of the files and directories on a server. This callback only returns
// files found, and will keep walking deeper and deeper into directories.
inc := new(backup.IncludedFiles)
if err := w.Walk(cleaned, ctx, func(f os.FileInfo, p string) bool {
// Avoid unnecessary parsing if there are no ignored files, nothing will match anyways
// so no reason to call the function.
if len(ignored) == 0 || !i.MatchesPath(strings.TrimPrefix(p, fs.Path() + "/")) {
inc.Push(&f, p)
}
// We can't just abort if the path is technically ignored. It is possible there is a nested
// file or folder that should not be excluded, so in this case we need to just keep going
// until we get to a final state.
return true
}); err != nil {
return nil, err
}
return inc, nil
}

View File

@@ -1,70 +0,0 @@
package server
import (
"context"
"golang.org/x/sync/errgroup"
"io/ioutil"
"os"
"path/filepath"
)
type FileWalker struct {
*Filesystem
}
// Returns a new walker instance.
func (fs *Filesystem) NewWalker() *FileWalker {
return &FileWalker{fs}
}
// Iterate over all of the files and directories within a given directory. When a file is
// found the callback will be called with the file information. If a directory is encountered
// it will be recursively passed back through to this function.
func (fw *FileWalker) Walk(dir string, ctx context.Context, callback func (os.FileInfo, string) bool) error {
cleaned, err := fw.SafePath(dir)
if err != nil {
return err
}
// Get all of the files from this directory.
files, err := ioutil.ReadDir(cleaned)
if err != nil {
return err
}
// Create an error group that we can use to run processes in parallel while retaining
// the ability to cancel the entire process immediately should any of it fail.
g, ctx := errgroup.WithContext(ctx)
for _, f := range files {
if f.IsDir() {
fi := f
p := filepath.Join(cleaned, f.Name())
// Recursively call this function to continue digging through the directory tree within
// a seperate goroutine. If the context is canceled abort this process.
g.Go(func() error {
select {
case <-ctx.Done():
return ctx.Err()
default:
// If the callback returns true, go ahead and keep walking deeper. This allows
// us to programatically continue deeper into directories, or stop digging
// if that pathway knows it needs nothing else.
if callback(fi, p) {
return fw.Walk(p, ctx, callback)
}
return nil
}
})
} else {
// If this isn't a directory, go ahead and pass the file information into the
// callback. We don't care about the response since we won't be stepping into
// anything from here.
callback(f, filepath.Join(cleaned, f.Name()))
}
}
// Block until all of the routines finish and have returned a value.
return g.Wait()
}

View File

@@ -10,7 +10,6 @@ import (
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pterodactyl/wings/api" "github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"go.uber.org/zap" "go.uber.org/zap"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -131,7 +130,7 @@ func (ip *InstallationProcess) Run() error {
// Writes the installation script to a temporary file on the host machine so that it // Writes the installation script to a temporary file on the host machine so that it
// can be properly mounted into the installation container and then executed. // can be properly mounted into the installation container and then executed.
func (ip *InstallationProcess) writeScriptToDisk() (string, error) { func (ip *InstallationProcess) writeScriptToDisk() (string, error) {
d, err := ioutil.TempDir("", "pterodactyl/") d, err := ioutil.TempDir("", "pterodactyl")
if err != nil { if err != nil {
return "", errors.WithStack(err) return "", errors.WithStack(err)
} }
@@ -232,11 +231,6 @@ func (ip *InstallationProcess) BeforeExecute() (string, error) {
return fileName, nil return fileName, nil
} }
// Returns the log path for the installation process.
func (ip *InstallationProcess) GetLogPath() string {
return filepath.Join(config.Get().System.GetInstallLogPath(), ip.Server.Uuid+".log")
}
// Cleans up after the execution of the installation process. This grabs the logs from the // Cleans up after the execution of the installation process. This grabs the logs from the
// process to store in the server configuration directory, and then destroys the associated // process to store in the server configuration directory, and then destroys the associated
// installation container. // installation container.
@@ -254,7 +248,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
return errors.WithStack(err) return errors.WithStack(err)
} }
f, err := os.OpenFile(ip.GetLogPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) f, err := os.OpenFile(filepath.Join("data/install_logs/", ip.Server.Uuid+".log"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil { if err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} }

View File

@@ -230,6 +230,7 @@ func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
s.Resources = ResourceUsage{} s.Resources = ResourceUsage{}
// Forces the configuration to be synced with the panel. // Forces the configuration to be synced with the panel.
zap.S().Debugw("syncing config with panel", zap.String("server", s.Uuid))
if err := s.SyncWithConfiguration(data); err != nil { if err := s.SyncWithConfiguration(data); err != nil {
return nil, err return nil, err
} }

View File

@@ -4,7 +4,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/pterodactyl/wings/config"
"go.uber.org/zap" "go.uber.org/zap"
"io" "io"
"io/ioutil" "io/ioutil"
@@ -12,6 +11,8 @@ import (
"sync" "sync"
) )
const stateFileLocation = "data/.states.json"
var stateMutex sync.Mutex var stateMutex sync.Mutex
// Returns the state of the servers. // Returns the state of the servers.
@@ -21,7 +22,7 @@ func getServerStates() (map[string]string, error) {
defer stateMutex.Unlock() defer stateMutex.Unlock()
// Open the states file. // Open the states file.
f, err := os.OpenFile(config.Get().System.GetStatesPath(), os.O_RDONLY|os.O_CREATE, 0644) f, err := os.OpenFile(stateFileLocation, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil { if err != nil {
return nil, errors.WithStack(err) return nil, errors.WithStack(err)
} }
@@ -54,7 +55,7 @@ func saveServerStates() error {
defer stateMutex.Unlock() defer stateMutex.Unlock()
// Write the data to the file // Write the data to the file
if err := ioutil.WriteFile(config.Get().System.GetStatesPath(), data, 0644); err != nil { if err := ioutil.WriteFile(stateFileLocation, data, 0644); err != nil {
return errors.WithStack(err) return errors.WithStack(err)
} }
@@ -140,4 +141,4 @@ func (s *Server) GetState() string {
// not the response from Docker. // not the response from Docker.
func (s *Server) IsRunning() bool { func (s *Server) IsRunning() bool {
return s.GetState() == ProcessRunningState || s.GetState() == ProcessStartingState return s.GetState() == ProcessRunningState || s.GetState() == ProcessStartingState
} }

View File

@@ -52,6 +52,11 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
} }
} else { } else {
s.Suspended = v s.Suspended = v
if s.Suspended {
zap.S().Debugw("server has been suspended", zap.String("server", s.Uuid))
} else {
zap.S().Debugw("server has been unsuspended", zap.String("server", s.Uuid))
}
} }
// Environment and Mappings should be treated as a full update at all times, never a // Environment and Mappings should be treated as a full update at all times, never a

View File

@@ -2,5 +2,5 @@ package system
const ( const (
// The current version of this software. // The current version of this software.
Version = "1.0.0-beta.2" Version = "1.0.0-beta.1"
) )