Merge branch 'dane/api-cleanup' into develop

This commit is contained in:
Dane Everitt
2021-02-23 21:25:10 -08:00
35 changed files with 616 additions and 1016 deletions

View File

@@ -8,16 +8,16 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server/backup"
)
// Notifies the panel of a backup's state and returns an error if one is encountered
// while performing this action.
func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, successful bool) error {
if err := api.New().SendBackupStatus(uuid, ad.ToRequest(successful)); err != nil {
if !api.IsRequestError(err) {
if err := s.client.SetBackupStatus(s.Context(), uuid, ad.ToRequest(successful)); err != nil {
if !remote.IsRequestError(err) {
s.Log().WithFields(log.Fields{
"backup": uuid,
"error": err,
@@ -131,7 +131,7 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
// Send an API call to the Panel as soon as this function is done running so that
// the Panel is informed of the restoration status of this backup.
defer func() {
if rerr := api.New().SendRestorationStatus(b.Identifier(), err == nil); rerr != nil {
if rerr := s.client.SendRestorationStatus(s.Context(), b.Identifier(), err == nil); rerr != nil {
s.Log().WithField("error", rerr).WithField("backup", b.Identifier()).Error("failed to notify Panel of backup restoration status")
}
}()

View File

@@ -9,8 +9,8 @@ import (
"sync"
"github.com/apex/log"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
)
type AdapterType string
@@ -31,8 +31,8 @@ type ArchiveDetails struct {
}
// ToRequest returns a request object.
func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
return api.BackupRequest{
func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
return remote.BackupRequest{
Checksum: ad.Checksum,
ChecksumType: ad.ChecksumType,
Size: ad.Size,
@@ -49,12 +49,15 @@ type Backup struct {
// compatible with a standard .gitignore structure.
Ignore string `json:"ignore"`
client remote.Client
adapter AdapterType
logContext map[string]interface{}
}
// noinspection GoNameStartsWithPackageName
type BackupInterface interface {
// SetClient sets the API request client on the backup interface.
SetClient(c remote.Client)
// Identifier returns the UUID of this backup as tracked by the panel
// instance.
Identifier() string
@@ -84,6 +87,10 @@ type BackupInterface interface {
Restore(reader io.Reader, callback RestoreCallback) error
}
func (b *Backup) SetClient(c remote.Client) {
b.client = c
}
func (b *Backup) Identifier() string {
return b.Uuid
}

View File

@@ -6,6 +6,7 @@ import (
"os"
"github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/system"
)
@@ -15,9 +16,10 @@ type LocalBackup struct {
var _ BackupInterface = (*LocalBackup)(nil)
func NewLocal(uuid string, ignore string) *LocalBackup {
func NewLocal(client remote.Client, uuid string, ignore string) *LocalBackup {
return &LocalBackup{
Backup{
client: client,
Uuid: uuid,
Ignore: ignore,
adapter: LocalBackupAdapter,
@@ -27,14 +29,8 @@ func NewLocal(uuid string, ignore string) *LocalBackup {
// LocateLocal finds the backup for a server and returns the local path. This
// will obviously only work if the backup was created as a local backup.
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
b := &LocalBackup{
Backup{
Uuid: uuid,
Ignore: "",
},
}
func LocateLocal(client remote.Client, uuid string) (*LocalBackup, os.FileInfo, error) {
b := NewLocal(client, uuid, "")
st, err := os.Stat(b.Path())
if err != nil {
return nil, nil, err

View File

@@ -3,6 +3,7 @@ package backup
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
"net/http"
@@ -10,8 +11,8 @@ import (
"strconv"
"github.com/juju/ratelimit"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/remote"
)
type S3Backup struct {
@@ -20,9 +21,10 @@ type S3Backup struct {
var _ BackupInterface = (*S3Backup)(nil)
func NewS3(uuid string, ignore string) *S3Backup {
func NewS3(client remote.Client, uuid string, ignore string) *S3Backup {
return &S3Backup{
Backup{
client: client,
Uuid: uuid,
Ignore: ignore,
adapter: S3BackupAdapter,
@@ -91,7 +93,7 @@ func (s *S3Backup) generateRemoteRequest(rc io.ReadCloser) error {
s.log().WithField("size", size).Debug("got size of backup")
s.log().Debug("attempting to get S3 upload urls from Panel...")
urls, err := api.New().GetBackupRemoteUploadURLs(s.Backup.Uuid, size)
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
if err != nil {
return err
}

View File

@@ -17,9 +17,9 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/system"
)
@@ -88,9 +88,9 @@ func (s *Server) Reinstall() error {
// Internal installation function used to simplify reporting back to the Panel.
func (s *Server) internalInstall() error {
script, err := api.New().GetInstallationScript(s.Id())
script, err := s.client.GetInstallationScript(s.Context(), s.Id())
if err != nil {
if !api.IsRequestError(err) {
if !remote.IsRequestError(err) {
return err
}
@@ -113,7 +113,7 @@ func (s *Server) internalInstall() error {
type InstallationProcess struct {
Server *Server
Script *api.InstallationScript
Script *remote.InstallationScript
client *client.Client
context context.Context
@@ -121,7 +121,7 @@ type InstallationProcess struct {
// Generates a new installation process struct that will be used to create containers,
// and otherwise perform installation commands for a server.
func NewInstallationProcess(s *Server, script *api.InstallationScript) (*InstallationProcess, error) {
func NewInstallationProcess(s *Server, script *remote.InstallationScript) (*InstallationProcess, error) {
proc := &InstallationProcess{
Script: script,
Server: s,
@@ -532,9 +532,9 @@ func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) erro
// value of "true" means everything was successful, "false" means something went
// wrong and the server must be deleted and re-created.
func (s *Server) SyncInstallState(successful bool) error {
err := api.New().SendInstallationStatus(s.Id(), successful)
err := s.client.SetInstallationStatus(s.Context(), s.Id(), successful)
if err != nil {
if !api.IsRequestError(err) {
if !remote.IsRequestError(err) {
return err
}

View File

@@ -7,10 +7,10 @@ import (
"sync"
"github.com/apex/log"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote"
)
var dockerEvents = []string{
@@ -186,7 +186,7 @@ func (s *Server) onConsoleOutput(data string) {
if s.IsRunning() {
stop := processConfiguration.Stop
if stop.Type == api.ProcessStopCommand && data == stop.Value {
if stop.Type == remote.ProcessStopCommand && data == stop.Value {
s.Environment.SetState(environment.ProcessOfflineState)
}
}

View File

@@ -7,6 +7,7 @@ import (
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
"time"
@@ -14,13 +15,16 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/gammazero/workerpool"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/environment/docker"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server/filesystem"
)
type Manager struct {
mu sync.RWMutex
client remote.Client
servers []*Server
}
@@ -28,8 +32,8 @@ type Manager struct {
// the servers that are currently present on the filesystem and set them into
// the manager.
func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
m := NewEmptyManager()
if err := m.init(ctx, client); err != nil {
m := NewEmptyManager(client)
if err := m.init(ctx); err != nil {
return nil, err
}
return m, nil
@@ -38,58 +42,14 @@ func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
// NewEmptyManager returns a new empty manager collection without actually
// loading any of the servers from the disk. This allows the caller to set their
// own servers into the collection as needed.
func NewEmptyManager() *Manager {
return &Manager{}
func NewEmptyManager(client remote.Client) *Manager {
return &Manager{client: client}
}
// initializeFromRemoteSource iterates over a given directory and loads all of
// the servers listed before returning them to the calling function.
func (m *Manager) init(ctx context.Context, client remote.Client) error {
log.Info("fetching list of servers from API")
servers, err := client.GetServers(ctx, config.Get().RemoteQuery.BootServersPerPage)
if err != nil {
if !remote.IsRequestError(err) {
return errors.WithStackIf(err)
}
return errors.New(err.Error())
}
start := time.Now()
log.WithField("total_configs", len(servers)).Info("processing servers returned by the API")
pool := workerpool.New(runtime.NumCPU())
log.Debugf("using %d workerpools to instantiate server instances", runtime.NumCPU())
for _, data := range servers {
data := data
pool.Submit(func() {
// Parse the json.RawMessage into an expected struct value. We do this here so that a single broken
// server does not cause the entire boot process to hang, and allows us to show more useful error
// messaging in the output.
d := api.ServerConfigurationResponse{
Settings: data.Settings,
}
log.WithField("server", data.Uuid).Info("creating new server object from API response")
if err := json.Unmarshal(data.ProcessConfiguration, &d.ProcessConfiguration); err != nil {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
return
}
s, err := FromConfiguration(d)
if err != nil {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
return
}
m.Add(s)
})
}
// Wait until we've processed all of the configuration files in the directory
// before continuing.
pool.StopWait()
diff := time.Now().Sub(start)
log.WithField("duration", fmt.Sprintf("%s", diff)).Info("finished processing server configurations")
return nil
// Client returns the HTTP client interface that allows interaction with the
// Panel API.
func (m *Manager) Client() remote.Client {
return m.client
}
// Put replaces all of the current values in the collection with the value that
@@ -201,4 +161,104 @@ func (m *Manager) ReadStates() (map[string]string, error) {
}
}
return out, nil
}
}
// InitServer initializes a server using a data byte array. This will be
// marshaled into the given struct using a YAML marshaler. This will also
// configure the given environment for a server.
func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server, error) {
s, err := New(m.client)
if err != nil {
return nil, errors.WithMessage(err, "loader: failed to instantiate empty server struct")
}
if err := s.UpdateDataStructure(data.Settings); err != nil {
return nil, err
}
s.Archiver = Archiver{Server: s}
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace(), s.Config().Egg.FileDenylist)
// Right now we only support a Docker based environment, so I'm going to hard code
// this logic in. When we're ready to support other environment we'll need to make
// some modifications here obviously.
settings := environment.Settings{
Mounts: s.Mounts(),
Allocations: s.cfg.Allocations,
Limits: s.cfg.Build,
}
envCfg := environment.NewConfiguration(settings, s.GetEnvironmentVariables())
meta := docker.Metadata{
Image: s.Config().Container.Image,
}
if env, err := docker.New(s.Id(), &meta, envCfg); err != nil {
return nil, err
} else {
s.Environment = env
s.StartEventListeners()
s.Throttler().StartTimer(s.Context())
}
// Forces the configuration to be synced with the panel.
if err := s.SyncWithConfiguration(data); err != nil {
return nil, err
}
// If the server's data directory exists, force disk usage calculation.
if _, err := os.Stat(s.Filesystem().Path()); err == nil {
s.Filesystem().HasSpaceAvailable(true)
}
return s, nil
}
// initializeFromRemoteSource iterates over a given directory and loads all of
// the servers listed before returning them to the calling function.
func (m *Manager) init(ctx context.Context) error {
log.Info("fetching list of servers from API")
servers, err := m.client.GetServers(ctx, config.Get().RemoteQuery.BootServersPerPage)
if err != nil {
if !remote.IsRequestError(err) {
return errors.WithStackIf(err)
}
return errors.New(err.Error())
}
start := time.Now()
log.WithField("total_configs", len(servers)).Info("processing servers returned by the API")
pool := workerpool.New(runtime.NumCPU())
log.Debugf("using %d workerpools to instantiate server instances", runtime.NumCPU())
for _, data := range servers {
data := data
pool.Submit(func() {
// Parse the json.RawMessage into an expected struct value. We do this here so that a single broken
// server does not cause the entire boot process to hang, and allows us to show more useful error
// messaging in the output.
d := remote.ServerConfigurationResponse{
Settings: data.Settings,
}
log.WithField("server", data.Uuid).Info("creating new server object from API response")
if err := json.Unmarshal(data.ProcessConfiguration, &d.ProcessConfiguration); err != nil {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
return
}
s, err := m.InitServer(d)
if err != nil {
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
return
}
m.Add(s)
})
}
// Wait until we've processed all of the configuration files in the directory
// before continuing.
pool.StopWait()
diff := time.Now().Sub(start)
log.WithField("duration", fmt.Sprintf("%s", diff)).Info("finished processing server configurations")
return nil
}

View File

@@ -4,18 +4,17 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/creasty/defaults"
"github.com/pterodactyl/wings/api"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/environment/docker"
"github.com/pterodactyl/wings/events"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server/filesystem"
"github.com/pterodactyl/wings/system"
"golang.org/x/sync/semaphore"
@@ -36,7 +35,8 @@ type Server struct {
// Maintains the configuration for the server. This is the data that gets returned by the Panel
// such as build settings and container images.
cfg Configuration
cfg Configuration
client remote.Client
// The crash handler for this server instance.
crasher CrashHandler
@@ -53,7 +53,7 @@ type Server struct {
// Defines the process configuration for the server instance. This is dynamically
// fetched from the Pterodactyl Server instance each time the server process is
// started, and then cached here.
procConfig *api.ProcessConfiguration
procConfig *remote.ProcessConfiguration
// Tracks the installation process for this server and prevents a server from running
// two installer processes at the same time. This also allows us to cancel a running
@@ -72,11 +72,12 @@ type Server struct {
// Returns a new server instance with a context and all of the default values set on
// the instance.
func New() (*Server, error) {
func New(client remote.Client) (*Server, error) {
ctx, cancel := context.WithCancel(context.Background())
s := Server{
ctx: ctx,
ctxCancel: &cancel,
client: client,
installing: system.NewAtomicBool(false),
transferring: system.NewAtomicBool(false),
}
@@ -148,13 +149,13 @@ func (s *Server) Log() *log.Entry {
// This also means mass actions can be performed against servers on the Panel and they
// will automatically sync with Wings when the server is started.
func (s *Server) Sync() error {
cfg, err := api.New().GetServerConfiguration(s.Id())
cfg, err := s.client.GetServerConfiguration(s.Context(), s.Id())
if err != nil {
if !api.IsRequestError(err) {
if !remote.IsRequestError(err) {
return err
}
if err.(*api.RequestError).Status == "404" {
if err.(*remote.RequestError).Status == "404" {
return &serverDoesNotExist{}
}
@@ -164,7 +165,7 @@ func (s *Server) Sync() error {
return s.SyncWithConfiguration(cfg)
}
func (s *Server) SyncWithConfiguration(cfg api.ServerConfigurationResponse) error {
func (s *Server) SyncWithConfiguration(cfg remote.ServerConfigurationResponse) error {
// Update the data structure and persist it to the disk.
if err := s.UpdateDataStructure(cfg.Settings); err != nil {
return err
@@ -218,7 +219,7 @@ func (s *Server) IsSuspended() bool {
return s.Config().Suspended
}
func (s *Server) ProcessConfiguration() *api.ProcessConfiguration {
func (s *Server) ProcessConfiguration() *remote.ProcessConfiguration {
s.RLock()
defer s.RUnlock()
@@ -295,61 +296,11 @@ func (s *Server) OnStateChange() {
}
}
// Determines if the server state is running or not. This is different than the
// environment state, it is simply the tracked state from this daemon instance, and
// not the response from Docker.
// IsRunning determines if the server state is running or not. This is different
// than the environment state, it is simply the tracked state from this daemon
// instance, and not the response from Docker.
func (s *Server) IsRunning() bool {
st := s.Environment.State()
return st == environment.ProcessRunningState || st == environment.ProcessStartingState
}
// FromConfiguration initializes a server using a data byte array. This will be
// marshaled into the given struct using a YAML marshaler. This will also
// configure the given environment for a server.
func FromConfiguration(data api.ServerConfigurationResponse) (*Server, error) {
s, err := New()
if err != nil {
return nil, errors.WithMessage(err, "loader: failed to instantiate empty server struct")
}
if err := s.UpdateDataStructure(data.Settings); err != nil {
return nil, err
}
s.Archiver = Archiver{Server: s}
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace(), s.Config().Egg.FileDenylist)
// Right now we only support a Docker based environment, so I'm going to hard code
// this logic in. When we're ready to support other environment we'll need to make
// some modifications here obviously.
settings := environment.Settings{
Mounts: s.Mounts(),
Allocations: s.cfg.Allocations,
Limits: s.cfg.Build,
}
envCfg := environment.NewConfiguration(settings, s.GetEnvironmentVariables())
meta := docker.Metadata{
Image: s.Config().Container.Image,
}
if env, err := docker.New(s.Id(), &meta, envCfg); err != nil {
return nil, err
} else {
s.Environment = env
s.StartEventListeners()
s.Throttler().StartTimer(s.Context())
}
// Forces the configuration to be synced with the panel.
if err := s.SyncWithConfiguration(data); err != nil {
return nil, err
}
// If the server's data directory exists, force disk usage calculation.
if _, err := os.Stat(s.Filesystem().Path()); err == nil {
s.Filesystem().HasSpaceAvailable(true)
}
return s, nil
}