Compare commits

..

34 Commits

Author SHA1 Message Date
Dane Everitt
35ba6d7524 Update CHANGELOG.md 2021-04-24 16:52:19 -07:00
Dane Everitt
fb0e769306 fix error when out of disk space; closes pterodactyl/panel#3273 2021-04-18 14:48:42 -07:00
Dane Everitt
0676a82a21 Add better error handling for filesystem 2021-04-17 13:29:18 -07:00
Dane Everitt
a0ae5fd131 Merge branch 'develop' of github.com:pterodactyl/wings into develop 2021-04-17 13:13:40 -07:00
Dane Everitt
4b244e96fb Fix .rar file decompression; closes pterodactyl/panel#3267 2021-04-17 13:13:37 -07:00
Dane Everitt
488884fdee Merge pull request #92 from parkervcp/fix_docker_build
Fixes ghcr build
2021-04-13 08:18:30 -07:00
Michael Parker
cfa338108f Fixes ghcr build
Removes version pins so packages install properly.
2021-04-12 19:38:16 -04:00
Dane Everitt
16b0ca3a8e Use io#LimitReader to avoid panic when reading files with active writes; closes pterodactyl/panel#3131 2021-04-04 10:42:03 -07:00
Dane Everitt
f57c24002e More API response fixing 2021-04-04 10:20:27 -07:00
Dane Everitt
8dfd494eaf Better explain what is happening in this file 2021-04-03 14:16:00 -07:00
Dane Everitt
2e0496c1f9 Add note about handling of UTF-8 sequences in properties files. 2021-04-03 14:02:37 -07:00
Dane Everitt
f85ee1aa73 cleanup 2021-04-03 13:20:07 -07:00
Dane Everitt
d4b63bef39 Fix details fetching for a single server instance 2021-04-03 13:15:11 -07:00
Dane Everitt
4c3b497652 Better error handling and reporting for image pull errors 2021-04-03 12:52:32 -07:00
Dane Everitt
ff62d16085 Merge branch 'develop' of github.com:pterodactyl/wings into develop 2021-04-03 11:18:44 -07:00
Dane Everitt
202ca922ad Update README.md 2021-04-03 11:18:33 -07:00
Dane Everitt
76b7967fef Merge pull request #88 from Antony1060/develop
Added app name
2021-04-03 11:13:29 -07:00
Dane Everitt
1b1eaa3171 Avoid expensive copies of the config for every line output 2021-04-03 11:11:36 -07:00
Dane Everitt
87f0b11078 Merge pull request #90 from Antony1060/fix
Fixed /api/servers
2021-04-03 11:08:43 -07:00
Dane Everitt
b448310a33 Correctly return servers installed on wings and their resource usage 2021-04-03 11:08:26 -07:00
Dane Everitt
f1b85ef0ab Merge pull request #91 from nysos3/develop
Fix reading User.Gid from WINGS_GID over WINGS_UID
2021-04-03 09:03:10 -07:00
Cody Carrell
bec6a6112d Fix reading User.Gid from WINGS_GID over WINGS_UID 2021-04-02 22:45:56 -04:00
antony1060
b691b8f06f Fixed /api/servers 2021-04-02 21:32:30 +02:00
Dane Everitt
31127620e5 License date updates 2021-03-26 09:33:24 -07:00
Dane Everitt
5e7316e09a Update CHANGELOG.md 2021-03-26 09:13:38 -07:00
Antony
52fcf1e37f Added defaults
Co-authored-by: Jakob <dev@schrej.net>
2021-03-24 11:24:54 +01:00
antony1060
0c17e240f4 Added app name 2021-03-24 10:26:03 +01:00
Matthew Penner
471886dd34 internally mark if a server is restoring to restrict actions 2021-03-12 16:19:35 -07:00
Dane Everitt
b63a491b5e Update CHANGELOG.md 2021-03-07 17:37:03 -08:00
Dane Everitt
6902422229 Update CHANGELOG.md 2021-03-07 17:35:55 -08:00
Dane Everitt
5f5b2bc84e Quick little code cleanup and adding some commentary 2021-03-07 17:31:45 -08:00
Dane Everitt
81a411a42c Merge branch 'develop' of github.com:pterodactyl/wings into develop 2021-03-07 17:24:47 -08:00
Dane Everitt
37c6b85489 Don't throw back errors on termination if the container doesn't exist; closes pterodactyl/panel#3149 2021-03-07 17:24:45 -08:00
Matthew Penner
0e3778ac47 transfers: use backup archiver 2021-03-07 11:04:15 -07:00
36 changed files with 524 additions and 526 deletions

View File

@@ -1,5 +1,32 @@
# Changelog # Changelog
## v1.4.0
### Fixed
* **[Breaking]** Fixes `/api/servers` and `/api/servers/:server` not properly returning all of the relevant server information and resource usage.
* Fixes Wings improperly reading `WINGS_UID` and not `WINGS_GID` when running in containerized environments.
* Fixes a panic encountered when returning the contents of a file that is actively being written to by another process.
* Corrected the handling of files that are being decompressed to properly support `.rar` files.
* Fixes the error message returned when a server has run out of disk space to properly indicate such, rather than indicating that the file is a directory.
### Changed
* Improved the error handling and output when an error is encountered while pulling an image for a server.
* Improved robustness of code handling value replacement in configuration files to not potentially panic if a non-string value is encountered as the replacement type.
* Improves error handling throughout the server filesystem.
### Added
* Adds the ability to set the internal name of the application in response output from the console using the `app_name` key in the `config.yml` file.
## v1.3.2
### Fixed
* Correctly sets the internal state of the server as restoring when a restore is being performed to avoid any accidental booting.
## v1.3.1
### Fixed
* Fixes an error being returned to the client when attempting to restart a server when the container no longer exists on the machine.
### Changed
* Updated server transfer logic to use newer file archiving tools to avoid frequent errors when transferring symlinked files.
## v1.3.0 ## v1.3.0
### Fixed ### Fixed
* Fixes improper error handling when attempting to create a new Docker network. * Fixes improper error handling when attempting to create a new Docker network.

View File

@@ -2,7 +2,7 @@
FROM golang:1.15-alpine3.12 AS builder FROM golang:1.15-alpine3.12 AS builder
ARG VERSION ARG VERSION
RUN apk add --update --no-cache git=2.26.2-r0 make=4.3-r0 upx=3.96-r0 RUN apk add --update --no-cache git make upx
WORKDIR /app/ WORKDIR /app/
COPY go.mod go.sum /app/ COPY go.mod go.sum /app/
RUN go mod download RUN go mod download

View File

@@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2019 Dane Everitt <dane@daneeveritt.com> Copyright (c) 2018 - 2021 Dane Everitt <dane@daneeveritt.com> and Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@@ -19,14 +19,19 @@ I would like to extend my sincere thanks to the following sponsors for helping f
| Company | About | | Company | About |
| ------- | ----- | | ------- | ----- |
| [**WISP**](https://wisp.gg) | Extra features. | | [**WISP**](https://wisp.gg) | Extra features. |
| [**MixmlHosting**](https://mixmlhosting.com) | MixmlHosting provides high quality Virtual Private Servers along with game servers, all at a affordable price. |
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
| [**Bloom.host**](https://bloom.host) | Bloom.host offers dedicated core VPS and Minecraft hosting with Ryzen 9 processors. With owned-hardware, we offer truly unbeatable prices on high-performance hosting. | | [**Bloom.host**](https://bloom.host) | Bloom.host offers dedicated core VPS and Minecraft hosting with Ryzen 9 processors. With owned-hardware, we offer truly unbeatable prices on high-performance hosting. |
| [**MineStrator**](https://minestrator.com/) | Looking for a French highend hosting company for you minecraft server? More than 14,000 members on our discord, trust us. | | [**MineStrator**](https://minestrator.com/) | Looking for a French highend hosting company for you minecraft server? More than 14,000 members on our discord, trust us. |
| [**DedicatedMC**](https://dedicatedmc.io/) | DedicatedMC provides Raw Power hosting at affordable pricing, making sure to never compromise on your performance and giving you the best performance money can buy. | | [**DedicatedMC**](https://dedicatedmc.io/) | DedicatedMC provides Raw Power hosting at affordable pricing, making sure to never compromise on your performance and giving you the best performance money can buy. |
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! | | [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
| [**XCORE**](https://xcore-server.de/) | XCORE offers High-End Servers for hosting and gaming since 2012. Fast, excellent and well-known for eSports Gaming. | | [**XCORE**](https://xcore-server.de/) | XCORE offers High-End Servers for hosting and gaming since 2012. Fast, excellent and well-known for eSports Gaming. |
| [**RoyaleHosting**](https://royalehosting.net/) | Build your dreams and deploy them with RoyaleHostings reliable servers and network. Easy to use, provisioned in a couple of minutes. | | [**RoyaleHosting**](https://royalehosting.net/) | Build your dreams and deploy them with RoyaleHostings reliable servers and network. Easy to use, provisioned in a couple of minutes. |
| [**Spill Hosting**](https://spillhosting.no/) | Spill Hosting is a Norwegian hosting service, which aims to cheap services on quality servers. Premium i9-9900K processors will run your game like a dream. | | [**Spill Hosting**](https://spillhosting.no/) | Spill Hosting is a Norwegian hosting service, which aims for inexpensive services on quality servers. Premium i9-9900K processors will run your game like a dream. |
| [**DeinServerHost**](https://deinserverhost.de/) | DeinServerHost offers Dedicated, vps and Gameservers for many popular Games like Minecraft and Rust in Germany since 2013. | | [**DeinServerHost**](https://deinserverhost.de/) | DeinServerHost offers Dedicated, vps and Gameservers for many popular Games like Minecraft and Rust in Germany since 2013. |
| [**HostBend**](https://hostbend.com/) | HostBend offers a variety of solutions for developers, students, and others who have a tight budget but don't want to compromise quality and support. |
| [**Capitol Hosting Solutions**](https://capitolsolutions.cloud/) | CHS is *the* budget friendly hosting company for Australian and American gamers, offering a variety of plans from Web Hosting to Game Servers; Custom Solutions too! |
| [**ByteAnia**](https://byteania.com/?utm_source=pterodactyl) | ByteAnia offers the best performing and most affordable **Ryzen 5000 Series hosting** on the market for *unbeatable prices*! |
## Documentation ## Documentation
* [Panel Documentation](https://pterodactyl.io/panel/1.0/getting_started.html) * [Panel Documentation](https://pterodactyl.io/panel/1.0/getting_started.html)

View File

@@ -58,7 +58,7 @@ var versionCommand = &cobra.Command{
Use: "version", Use: "version",
Short: "Prints the current executable version and exits.", Short: "Prints the current executable version and exits.",
Run: func(cmd *cobra.Command, _ []string) { Run: func(cmd *cobra.Command, _ []string) {
fmt.Printf("wings v%s\nCopyright © 2018 - 2021 Dane Everitt & Contributors\n", system.Version) fmt.Printf("wings v%s\nCopyright © 2018 - %d Dane Everitt & Contributors\n", system.Version, time.Now().Year())
}, },
} }
@@ -400,7 +400,7 @@ __ [blue][bold]Pterodactyl[reset] _____/___/_______ _______ ______
\___/\___/___/___/___/___ /______/ \___/\___/___/___/___/___ /______/
/_______/ [bold]%s[reset] /_______/ [bold]%s[reset]
Copyright © 2018 - 2021 Dane Everitt & Contributors Copyright © 2018 - %d Dane Everitt & Contributors
Website: https://pterodactyl.io Website: https://pterodactyl.io
Source: https://github.com/pterodactyl/wings Source: https://github.com/pterodactyl/wings
@@ -408,7 +408,7 @@ License: https://github.com/pterodactyl/wings/blob/develop/LICENSE
This software is made available under the terms of the MIT license. This software is made available under the terms of the MIT license.
The above copyright notice and this permission notice shall be included The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.%s`), system.Version, "\n\n") in all copies or substantial portions of the Software.%s`), system.Version, time.Now().Year(), "\n\n")
} }
func exitWithConfigurationNotice() { func exitWithConfigurationNotice() {

View File

@@ -247,6 +247,8 @@ type Configuration struct {
// if the debug flag is passed through the command line arguments. // if the debug flag is passed through the command line arguments.
Debug bool Debug bool
AppName string `default:"Pterodactyl" json:"app_name" yaml:"app_name"`
// A unique identifier for this node in the Panel. // A unique identifier for this node in the Panel.
Uuid string Uuid string
@@ -395,7 +397,7 @@ func EnsurePterodactylUser() error {
if sysName == "busybox" { if sysName == "busybox" {
_config.System.Username = system.FirstNotEmpty(os.Getenv("WINGS_USERNAME"), "pterodactyl") _config.System.Username = system.FirstNotEmpty(os.Getenv("WINGS_USERNAME"), "pterodactyl")
_config.System.User.Uid = system.MustInt(system.FirstNotEmpty(os.Getenv("WINGS_UID"), "988")) _config.System.User.Uid = system.MustInt(system.FirstNotEmpty(os.Getenv("WINGS_UID"), "988"))
_config.System.User.Gid = system.MustInt(system.FirstNotEmpty(os.Getenv("WINGS_UID"), "988")) _config.System.User.Gid = system.MustInt(system.FirstNotEmpty(os.Getenv("WINGS_GID"), "988"))
return nil return nil
} }

View File

@@ -3,9 +3,14 @@ package docker
import ( import (
"bufio" "bufio"
"context" "context"
"emperror.dev/errors"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"strconv"
"strings"
"time"
"emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/container"
@@ -15,16 +20,9 @@ import (
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
"io"
"strconv"
"strings"
"time"
) )
type imagePullStatus struct { var ErrNotAttached = errors.Sentinel("not attached to instance")
Status string `json:"status"`
Progress string `json:"progress"`
}
// A custom console writer that allows us to keep a function blocked until the // A custom console writer that allows us to keep a function blocked until the
// given stream is properly closed. This does nothing special, only exists to // given stream is properly closed. This does nothing special, only exists to
@@ -38,14 +36,14 @@ func (nw noopWriter) Write(b []byte) (int, error) {
return len(b), nil return len(b), nil
} }
// Attaches to the docker container itself and ensures that we can pipe data in and out // Attach attaches to the docker container itself and ensures that we can pipe
// of the process stream. This should not be used for reading console data as you *will* // data in and out of the process stream. This should not be used for reading
// miss important output at the beginning because of the time delay with attaching to the // console data as you *will* miss important output at the beginning because of
// output. // the time delay with attaching to the output.
// //
// Calling this function will poll resources for the container in the background until the // Calling this function will poll resources for the container in the background
// provided context is canceled by the caller. Failure to cancel said context will cause // until the provided context is canceled by the caller. Failure to cancel said
// background memory leaks as the goroutine will not exit. // context will cause background memory leaks as the goroutine will not exit.
func (e *Environment) Attach() error { func (e *Environment) Attach() error {
if e.IsAttached() { if e.IsAttached() {
return nil return nil
@@ -108,27 +106,15 @@ func (e *Environment) Attach() error {
return nil return nil
} }
func (e *Environment) resources() container.Resources { // InSituUpdate performs an in-place update of the Docker container's resource
l := e.Configuration.Limits() // limits without actually making any changes to the operational state of the
// container. This allows memory, cpu, and IO limitations to be adjusted on the
return container.Resources{ // fly for individual instances.
Memory: l.BoundedMemoryLimit(),
MemoryReservation: l.MemoryLimit * 1_000_000,
MemorySwap: l.ConvertedSwap(),
CPUQuota: l.ConvertedCpuLimit(),
CPUPeriod: 100_000,
CPUShares: 1024,
BlkioWeight: l.IoWeight,
OomKillDisable: &l.OOMDisabled,
CpusetCpus: l.Threads,
}
}
// Performs an in-place update of the Docker container's resource limits without actually
// making any changes to the operational state of the container. This allows memory, cpu,
// and IO limitations to be adjusted on the fly for individual instances.
func (e *Environment) InSituUpdate() error { func (e *Environment) InSituUpdate() error {
if _, err := e.client.ContainerInspect(context.Background(), e.Id); err != nil { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
if _, err := e.client.ContainerInspect(ctx, e.Id); err != nil {
// If the container doesn't exist for some reason there really isn't anything // If the container doesn't exist for some reason there really isn't anything
// we can do to fix that in this process (it doesn't make sense at least). In those // we can do to fix that in this process (it doesn't make sense at least). In those
// cases just return without doing anything since we still want to save the configuration // cases just return without doing anything since we still want to save the configuration
@@ -138,25 +124,24 @@ func (e *Environment) InSituUpdate() error {
if client.IsErrNotFound(err) { if client.IsErrNotFound(err) {
return nil return nil
} }
return errors.Wrap(err, "environment/docker: could not inspect container")
return err
} }
u := container.UpdateConfig{ // CPU pinning cannot be removed once it is applied to a container. The same is true
// for removing memory limits, a container must be re-created.
//
// @see https://github.com/moby/moby/issues/41946
if _, err := e.client.ContainerUpdate(ctx, e.Id, container.UpdateConfig{
Resources: e.resources(), Resources: e.resources(),
}); err != nil {
return errors.Wrap(err, "environment/docker: could not update container")
} }
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
if _, err := e.client.ContainerUpdate(ctx, e.Id, u); err != nil {
return err
}
return nil return nil
} }
// Creates a new container for the server using all of the data that is currently // Create creates a new container for the server using all of the data that is
// available for it. If the container already exists it will be returnee. // currently available for it. If the container already exists it will be
// returned.
func (e *Environment) Create() error { func (e *Environment) Create() error {
// If the container already exists don't hit the user with an error, just return // If the container already exists don't hit the user with an error, just return
// the current information about it which is what we would do when creating the // the current information about it which is what we would do when creating the
@@ -164,12 +149,12 @@ func (e *Environment) Create() error {
if _, err := e.client.ContainerInspect(context.Background(), e.Id); err == nil { if _, err := e.client.ContainerInspect(context.Background(), e.Id); err == nil {
return nil return nil
} else if !client.IsErrNotFound(err) { } else if !client.IsErrNotFound(err) {
return err return errors.Wrap(err, "environment/docker: failed to inspect container")
} }
// Try to pull the requested image before creating the container. // Try to pull the requested image before creating the container.
if err := e.ensureImageExists(e.meta.Image); err != nil { if err := e.ensureImageExists(e.meta.Image); err != nil {
return err return errors.WithStackIf(err)
} }
a := e.Configuration.Allocations() a := e.Configuration.Allocations()
@@ -245,29 +230,14 @@ func (e *Environment) Create() error {
} }
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil { if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil {
return err return errors.Wrap(err, "environment/docker: failed to create container")
} }
return nil return nil
} }
func (e *Environment) convertMounts() []mount.Mount { // Destroy will remove the Docker container from the server. If the container
var out []mount.Mount // is currently running it will be forcibly stopped by Docker.
for _, m := range e.Configuration.Mounts() {
out = append(out, mount.Mount{
Type: mount.TypeBind,
Source: m.Source,
Target: m.Target,
ReadOnly: m.ReadOnly,
})
}
return out
}
// Remove the Docker container from the machine. If the container is currently running
// it will be forcibly stopped by Docker.
func (e *Environment) Destroy() error { func (e *Environment) Destroy() error {
// We set it to stopping than offline to prevent crash detection from being triggered. // We set it to stopping than offline to prevent crash detection from being triggered.
e.SetState(environment.ProcessStoppingState) e.SetState(environment.ProcessStoppingState)
@@ -291,9 +261,55 @@ func (e *Environment) Destroy() error {
return err return err
} }
// Attaches to the log for the container. This avoids us missing crucial output that // SendCommand sends the specified command to the stdin of the running container
// happens in the split seconds before the code moves from 'Starting' to 'Attaching' // instance. There is no confirmation that this data is sent successfully, only
// on the process. // that it gets pushed into the stdin.
func (e *Environment) SendCommand(c string) error {
if !e.IsAttached() {
return errors.Wrap(ErrNotAttached, "environment/docker: cannot send command to container")
}
e.mu.RLock()
defer e.mu.RUnlock()
// If the command being processed is the same as the process stop command then we
// want to mark the server as entering the stopping state otherwise the process will
// stop and Wings will think it has crashed and attempt to restart it.
if e.meta.Stop.Type == "command" && c == e.meta.Stop.Value {
e.SetState(environment.ProcessStoppingState)
}
_, err := e.stream.Conn.Write([]byte(c + "\n"))
return errors.Wrap(err, "environment/docker: could not write to container stream")
}
// Readlog reads the log file for the server. This does not care if the server
// is running or not, it will simply try to read the last X bytes of the file
// and return them.
func (e *Environment) Readlog(lines int) ([]string, error) {
r, err := e.client.ContainerLogs(context.Background(), e.Id, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Tail: strconv.Itoa(lines),
})
if err != nil {
return nil, errors.WithStack(err)
}
defer r.Close()
var out []string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
out = append(out, scanner.Text())
}
return out, nil
}
// Attaches to the log for the container. This avoids us missing crucial output
// that happens in the split seconds before the code moves from 'Starting' to
// 'Attaching' on the process.
func (e *Environment) followOutput() error { func (e *Environment) followOutput() error {
if exists, err := e.Exists(); !exists { if exists, err := e.Exists(); !exists {
if err != nil { if err != nil {
@@ -346,14 +362,19 @@ func (e *Environment) scanOutput(reader io.ReadCloser) {
go e.followOutput() go e.followOutput()
} }
// Pulls the image from Docker. If there is an error while pulling the image from the source type imagePullStatus struct {
// but the image already exists locally, we will report that error to the logger but continue Status string `json:"status"`
// with the process. Progress string `json:"progress"`
}
// Pulls the image from Docker. If there is an error while pulling the image
// from the source but the image already exists locally, we will report that
// error to the logger but continue with the process.
// //
// The reasoning behind this is that Quay has had some serious outages as of late, and we don't // The reasoning behind this is that Quay has had some serious outages as of
// need to block all of the servers from booting just because of that. I'd imagine in a lot of // late, and we don't need to block all of the servers from booting just because
// cases an outage shouldn't affect users too badly. It'll at least keep existing servers working // of that. I'd imagine in a lot of cases an outage shouldn't affect users too
// correctly if anything. // badly. It'll at least keep existing servers working correctly if anything.
func (e *Environment) ensureImageExists(image string) error { func (e *Environment) ensureImageExists(image string) error {
e.Events().Publish(environment.DockerImagePullStarted, "") e.Events().Publish(environment.DockerImagePullStarted, "")
defer e.Events().Publish(environment.DockerImagePullCompleted, "") defer e.Events().Publish(environment.DockerImagePullCompleted, "")
@@ -399,7 +420,7 @@ func (e *Environment) ensureImageExists(image string) error {
if ierr != nil { if ierr != nil {
// Well damn, something has gone really wrong here, just go ahead and abort there // Well damn, something has gone really wrong here, just go ahead and abort there
// isn't much anything we can do to try and self-recover from this. // isn't much anything we can do to try and self-recover from this.
return ierr return errors.Wrap(ierr, "environment/docker: failed to list images")
} }
for _, img := range images { for _, img := range images {
@@ -420,7 +441,7 @@ func (e *Environment) ensureImageExists(image string) error {
} }
} }
return err return errors.Wrapf(err, "environment/docker: failed to pull \"%s\" image for server", image)
} }
defer out.Close() defer out.Close()
@@ -447,3 +468,34 @@ func (e *Environment) ensureImageExists(image string) error {
return nil return nil
} }
func (e *Environment) convertMounts() []mount.Mount {
var out []mount.Mount
for _, m := range e.Configuration.Mounts() {
out = append(out, mount.Mount{
Type: mount.TypeBind,
Source: m.Source,
Target: m.Target,
ReadOnly: m.ReadOnly,
})
}
return out
}
func (e *Environment) resources() container.Resources {
l := e.Configuration.Limits()
return container.Resources{
Memory: l.BoundedMemoryLimit(),
MemoryReservation: l.MemoryLimit * 1_000_000,
MemorySwap: l.ConvertedSwap(),
CPUQuota: l.ConvertedCpuLimit(),
CPUPeriod: 100_000,
CPUShares: 1024,
BlkioWeight: l.IoWeight,
OomKillDisable: &l.OOMDisabled,
CpusetCpus: l.Threads,
}
}

View File

@@ -2,6 +2,11 @@ package docker
import ( import (
"context" "context"
"os"
"strings"
"syscall"
"time"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
@@ -9,11 +14,6 @@ import (
"github.com/docker/docker/client" "github.com/docker/docker/client"
"github.com/pterodactyl/wings/environment" "github.com/pterodactyl/wings/environment"
"github.com/pterodactyl/wings/remote" "github.com/pterodactyl/wings/remote"
"os"
"strings"
"syscall"
"time"
) )
// Run before the container starts and get the process configuration from the Panel. // Run before the container starts and get the process configuration from the Panel.
@@ -27,7 +27,7 @@ func (e *Environment) OnBeforeStart() error {
// Always destroy and re-create the server container to ensure that synced data from the Panel is used. // Always destroy and re-create the server container to ensure that synced data from the Panel is used.
if err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil { if err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
if !client.IsErrNotFound(err) { if !client.IsErrNotFound(err) {
return errors.WithMessage(err, "failed to remove server docker container during pre-boot") return errors.WrapIf(err, "environment/docker: failed to remove container during pre-boot")
} }
} }
@@ -71,7 +71,7 @@ func (e *Environment) Start() error {
// //
// @see https://github.com/pterodactyl/panel/issues/2000 // @see https://github.com/pterodactyl/panel/issues/2000
if !client.IsErrNotFound(err) { if !client.IsErrNotFound(err) {
return err return errors.WrapIf(err, "environment/docker: failed to inspect container")
} }
} else { } else {
// If the server is running update our internal state and continue on with the attach. // If the server is running update our internal state and continue on with the attach.
@@ -86,7 +86,7 @@ func (e *Environment) Start() error {
// to truncate them. // to truncate them.
if _, err := os.Stat(c.LogPath); err == nil { if _, err := os.Stat(c.LogPath); err == nil {
if err := os.Truncate(c.LogPath, 0); err != nil { if err := os.Truncate(c.LogPath, 0); err != nil {
return err return errors.Wrap(err, "environment/docker: failed to truncate instance logs")
} }
} }
} }
@@ -101,14 +101,14 @@ func (e *Environment) Start() error {
// exists on the system, and rebuild the container if that is required for server booting to // exists on the system, and rebuild the container if that is required for server booting to
// occur. // occur.
if err := e.OnBeforeStart(); err != nil { if err := e.OnBeforeStart(); err != nil {
return err return errors.WithStackIf(err)
} }
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel() defer cancel()
if err := e.client.ContainerStart(ctx, e.Id, types.ContainerStartOptions{}); err != nil { if err := e.client.ContainerStart(ctx, e.Id, types.ContainerStartOptions{}); err != nil {
return err return errors.WrapIf(err, "environment/docker: failed to start container")
} }
// No errors, good to continue through. // No errors, good to continue through.
@@ -174,7 +174,7 @@ func (e *Environment) Stop() error {
e.SetState(environment.ProcessOfflineState) e.SetState(environment.ProcessOfflineState)
return nil return nil
} }
return err return errors.Wrap(err, "environment/docker: cannot stop container")
} }
return nil return nil
@@ -208,7 +208,9 @@ func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
return ctxErr return ctxErr
} }
case err := <-errChan: case err := <-errChan:
if err != nil { // If the error stems from the container not existing there is no point in wasting
// CPU time to then try and terminate it.
if err != nil && !client.IsErrNotFound(err) {
if terminate { if terminate {
l := log.WithField("container_id", e.Id) l := log.WithField("container_id", e.Id)
if errors.Is(err, context.DeadlineExceeded) { if errors.Is(err, context.DeadlineExceeded) {
@@ -219,8 +221,7 @@ func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
return e.Terminate(os.Kill) return e.Terminate(os.Kill)
} }
return errors.WrapIf(err, "environment/docker: error waiting on container to enter \"not-running\" state")
return err
} }
case <-ok: case <-ok:
} }
@@ -232,7 +233,12 @@ func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
func (e *Environment) Terminate(signal os.Signal) error { func (e *Environment) Terminate(signal os.Signal) error {
c, err := e.client.ContainerInspect(context.Background(), e.Id) c, err := e.client.ContainerInspect(context.Background(), e.Id)
if err != nil { if err != nil {
return err // Treat missing containers as an okay error state, means it is obviously
// already terminated at this point.
if client.IsErrNotFound(err) {
return nil
}
return errors.WithStack(err)
} }
if !c.State.Running { if !c.State.Running {
@@ -249,13 +255,10 @@ func (e *Environment) Terminate(signal os.Signal) error {
// We set it to stopping than offline to prevent crash detection from being triggered. // We set it to stopping than offline to prevent crash detection from being triggered.
e.SetState(environment.ProcessStoppingState) e.SetState(environment.ProcessStoppingState)
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed") sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil && !client.IsErrNotFound(err) { if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil && !client.IsErrNotFound(err) {
return err return errors.WithStack(err)
} }
e.SetState(environment.ProcessOfflineState) e.SetState(environment.ProcessOfflineState)
return nil return nil

View File

@@ -1,100 +0,0 @@
package docker
import (
"bufio"
"bytes"
"context"
"emperror.dev/errors"
"encoding/json"
"github.com/docker/docker/api/types"
"github.com/pterodactyl/wings/environment"
"strconv"
)
type dockerLogLine struct {
Log string `json:"log"`
}
var ErrNotAttached = errors.New("not attached to instance")
func (e *Environment) setStream(s *types.HijackedResponse) {
e.mu.Lock()
defer e.mu.Unlock()
e.stream = s
}
// Sends the specified command to the stdin of the running container instance. There is no
// confirmation that this data is sent successfully, only that it gets pushed into the stdin.
func (e *Environment) SendCommand(c string) error {
if !e.IsAttached() {
return ErrNotAttached
}
e.mu.RLock()
defer e.mu.RUnlock()
// If the command being processed is the same as the process stop command then we want to mark
// the server as entering the stopping state otherwise the process will stop and Wings will think
// it has crashed and attempt to restart it.
if e.meta.Stop.Type == "command" && c == e.meta.Stop.Value {
e.SetState(environment.ProcessStoppingState)
}
_, err := e.stream.Conn.Write([]byte(c + "\n"))
return err
}
// Reads the log file for the server. This does not care if the server is running or not, it will
// simply try to read the last X bytes of the file and return them.
func (e *Environment) Readlog(lines int) ([]string, error) {
r, err := e.client.ContainerLogs(context.Background(), e.Id, types.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Tail: strconv.Itoa(lines),
})
if err != nil {
return nil, err
}
defer r.Close()
var out []string
scanner := bufio.NewScanner(r)
for scanner.Scan() {
out = append(out, scanner.Text())
}
return out, nil
}
// Docker stores the logs for server output in a JSON format. This function will iterate over the JSON
// that was read from the log file and parse it into a more human readable format.
func (e *Environment) parseLogToStrings(b []byte) ([]string, error) {
hasError := false
var out []string
scanner := bufio.NewScanner(bytes.NewReader(b))
for scanner.Scan() {
var l dockerLogLine
// Unmarshal the contents and allow up to a single error before bailing out of the process. We
// do this because if you're arbitrarily reading a length of the file you'll likely end up
// with the first line in the output being improperly formatted JSON. In those cases we want to
// just skip over it. However if we see another error we're going to bail out because that is an
// abnormal situation.
if err := json.Unmarshal([]byte(scanner.Text()), &l); err != nil {
if hasError {
return nil, err
}
hasError = true
continue
}
out = append(out, l.Log)
}
return out, nil
}

View File

@@ -30,6 +30,45 @@ const (
Xml = "xml" Xml = "xml"
) )
type ReplaceValue struct {
value []byte
valueType jsonparser.ValueType
}
// Value returns the underlying value of the replacement. Be aware that this
// can include escaped UTF-8 sequences that will need to be handled by the caller
// in order to avoid accidentally injecting invalid sequences into the running
// process.
//
// For example the expected value may be "§Foo" but you'll be working directly
// with "\u00a7FOo" for this value. This will cause user pain if not solved since
// that is clearly not the value they were expecting to be using.
func (cv *ReplaceValue) Value() []byte {
return cv.value
}
// Type returns the underlying data type for the Value field.
func (cv *ReplaceValue) Type() jsonparser.ValueType {
return cv.valueType
}
// String returns the value as a string representation. This will automatically
// handle casting the UTF-8 sequence into the expected value, switching something
// like "\u00a7Foo" into "§Foo".
func (cv *ReplaceValue) String() string {
if cv.Type() != jsonparser.String {
if cv.Type() == jsonparser.Null {
return "<nil>"
}
return "<invalid>"
}
str, err := jsonparser.ParseString(cv.value)
if err != nil {
panic(errors.Wrap(err, "parser: could not parse value"))
}
return str
}
type ConfigurationParser string type ConfigurationParser string
func (cp ConfigurationParser) String() string { func (cp ConfigurationParser) String() string {
@@ -77,15 +116,16 @@ func (f *ConfigurationFile) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// Defines a single find/replace instance for a given server configuration file. // ConfigurationFileReplacement defines a single find/replace instance for a
// given server configuration file.
type ConfigurationFileReplacement struct { type ConfigurationFileReplacement struct {
Match string `json:"match"` Match string `json:"match"`
IfValue string `json:"if_value"` IfValue string `json:"if_value"`
ReplaceWith ReplaceValue `json:"replace_with"` ReplaceWith ReplaceValue `json:"replace_with"`
} }
// Handles unmarshaling the JSON representation into a struct that provides more useful // UnmarshalJSON handles unmarshaling the JSON representation into a struct that
// data to this functionality. // provides more useful data to this functionality.
func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error { func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
m, err := jsonparser.GetString(data, "match") m, err := jsonparser.GetString(data, "match")
if err != nil { if err != nil {
@@ -410,48 +450,66 @@ func (f *ConfigurationFile) parseTextFile(path string) error {
return nil return nil
} }
// Parses a properties file and updates the values within it to match those that // parsePropertiesFile parses a properties file and updates the values within it
// are passed. Writes the file once completed. // to match those that are passed. Once completed the new file is written to the
// disk. This will cause comments not present at the head of the file to be
// removed unfortunately.
//
// Any UTF-8 value will be written back to the disk as their escaped value rather
// than the raw value There is no winning with this logic. This fixes a bug where
// users with hand rolled UTF-8 escape sequences would have all sorts of pain in
// their configurations because we were writing the UTF-8 literal characters which
// their games could not actually handle.
//
// However, by adding this fix to only store the escaped UTF-8 sequence we
// unwittingly introduced a "regression" that causes _other_ games to have issues
// because they can only handle the unescaped representations. I cannot think of
// a simple approach to this problem that doesn't just lead to more complicated
// cases and problems.
//
// So, if your game cannot handle parsing UTF-8 sequences that are escaped into
// the string, well, sucks. There are fewer of those games than there are games
// that have issues parsing the raw UTF-8 sequence into a string? Also how does
// one really know what the user intended at this point? We'd need to know if
// the value was escaped or not to begin with before setting it, which I suppose
// can work but jesus that is going to be some annoyingly complicated logic?
//
// @see https://github.com/pterodactyl/panel/issues/2308 (original)
// @see https://github.com/pterodactyl/panel/issues/3009 ("bug" introduced as result)
func (f *ConfigurationFile) parsePropertiesFile(path string) error { func (f *ConfigurationFile) parsePropertiesFile(path string) error {
// Open the file.
f2, err := os.Open(path)
if err != nil {
return err
}
var s strings.Builder var s strings.Builder
// Open the file and attempt to load any comments that currenty exist at the start
// Get any header comments from the file. // of the file. This is kind of a hack, but should work for a majority of users for
scanner := bufio.NewScanner(f2) // the time being.
for scanner.Scan() { if fd, err := os.Open(path); err != nil {
text := scanner.Text() return errors.Wrap(err, "parser: could not open file for reading")
if len(text) > 0 && text[0] != '#' { } else {
break scanner := bufio.NewScanner(fd)
// Scan until we hit a line that is not a comment that actually has content
// on it. Keep appending the comments until that time.
for scanner.Scan() {
text := scanner.Text()
if len(text) > 0 && text[0] != '#' {
break
}
s.WriteString(text + "\n")
}
_ = fd.Close()
if err := scanner.Err(); err != nil {
return errors.WithStackIf(err)
} }
s.WriteString(text)
s.WriteString("\n")
} }
// Close the file.
_ = f2.Close()
// Handle any scanner errors.
if err := scanner.Err(); err != nil {
return err
}
// Decode the properties file.
p, err := properties.LoadFile(path, properties.UTF8) p, err := properties.LoadFile(path, properties.UTF8)
if err != nil { if err != nil {
return err return errors.Wrap(err, "parser: could not load properties file for configuration update")
} }
// Replace any values that need to be replaced. // Replace any values that need to be replaced.
for _, replace := range f.Replace { for _, replace := range f.Replace {
data, err := f.LookupConfigurationValue(replace) data, err := f.LookupConfigurationValue(replace)
if err != nil { if err != nil {
return err return errors.Wrap(err, "parser: failed to lookup configuration value")
} }
v, ok := p.Get(replace.Match) v, ok := p.Get(replace.Match)
@@ -463,7 +521,7 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
} }
if _, _, err := p.Set(replace.Match, data); err != nil { if _, _, err := p.Set(replace.Match, data); err != nil {
return err return errors.Wrap(err, "parser: failed to set replacement value")
} }
} }
@@ -473,11 +531,11 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
if !ok { if !ok {
continue continue
} }
// This escape is intentional!
s.WriteString(key) //
s.WriteByte('=') // See the docblock for this function for more details, do not change this
s.WriteString(strings.Trim(strconv.QuoteToASCII(value), `"`)) // or you'll cause a flood of new issue reports no one wants to deal with.
s.WriteString("\n") s.WriteString(key + "=" + strings.Trim(strconv.QuoteToASCII(value), "\"") + "\n")
} }
// Open the file for writing. // Open the file for writing.
@@ -489,7 +547,7 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
// Write the data to the file. // Write the data to the file.
if _, err := w.Write([]byte(s.String())); err != nil { if _, err := w.Write([]byte(s.String())); err != nil {
return err return errors.Wrap(err, "parser: failed to write properties file to disk")
} }
return nil return nil

View File

@@ -1,24 +0,0 @@
package parser
import (
"github.com/buger/jsonparser"
)
type ReplaceValue struct {
value []byte
valueType jsonparser.ValueType `json:"-"`
}
func (cv *ReplaceValue) Value() []byte {
return cv.value
}
func (cv *ReplaceValue) String() string {
str, _ := jsonparser.ParseString(cv.value)
return str
}
func (cv *ReplaceValue) Type() jsonparser.ValueType {
return cv.valueType
}

View File

@@ -134,7 +134,7 @@ func (e *RequestError) getAsFilesystemError() (int, string) {
return http.StatusBadRequest, "Cannot perform that action: file is a directory." return http.StatusBadRequest, "Cannot perform that action: file is a directory."
} }
if filesystem.IsErrorCode(e.err, filesystem.ErrCodeDiskSpace) || strings.Contains(e.err.Error(), "filesystem: not enough disk space") { if filesystem.IsErrorCode(e.err, filesystem.ErrCodeDiskSpace) || strings.Contains(e.err.Error(), "filesystem: not enough disk space") {
return http.StatusBadRequest, "Cannot perform that action: file is a directory." return http.StatusBadRequest, "Cannot perform that action: not enough disk space available."
} }
if strings.HasSuffix(e.err.Error(), "file name too long") { if strings.HasSuffix(e.err.Error(), "file name too long") {
return http.StatusBadRequest, "Cannot perform that action: file name is too long." return http.StatusBadRequest, "Cannot perform that action: file name is too long."

View File

@@ -16,19 +16,9 @@ import (
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
) )
type serverProcData struct {
server.ResourceUsage
Suspended bool `json:"suspended"`
}
// Returns a single server from the collection of servers. // Returns a single server from the collection of servers.
func getServer(c *gin.Context) { func getServer(c *gin.Context) {
s := ExtractServer(c) c.JSON(http.StatusOK, ExtractServer(c).ToAPIResponse())
c.JSON(http.StatusOK, serverProcData{
ResourceUsage: s.Proc(),
Suspended: s.IsSuspended(),
})
} }
// Returns the logs for a given server instance. // Returns the logs for a given server instance.
@@ -204,12 +194,6 @@ func deleteServer(c *gin.Context) {
s.Events().Destroy() s.Events().Destroy()
s.Websockets().CancelAll() s.Websockets().CancelAll()
// Delete the server's archive if it exists. We intentionally don't return
// here, if the archive fails to delete, the server can still be removed.
if err := s.Archiver.DeleteIfExists(); err != nil {
s.Log().WithField("error", err).Warn("failed to delete server archive during deletion process")
}
// Remove any pending remote file downloads for the server. // Remove any pending remote file downloads for the server.
for _, dl := range downloader.ByServer(s.Id()) { for _, dl := range downloader.ByServer(s.Id()) {
dl.Cancel() dl.Cancel()

View File

@@ -63,7 +63,7 @@ func postServerBackup(c *gin.Context) {
// This endpoint will block until the backup is fully restored allowing for a // This endpoint will block until the backup is fully restored allowing for a
// spinner to be displayed in the Panel UI effectively. // spinner to be displayed in the Panel UI effectively.
// //
// TODO: stop the server if it is running; internally mark it as suspended // TODO: stop the server if it is running
func postServerRestoreBackup(c *gin.Context) { func postServerRestoreBackup(c *gin.Context) {
s := middleware.ExtractServer(c) s := middleware.ExtractServer(c)
client := middleware.ExtractApiClient(c) client := middleware.ExtractApiClient(c)
@@ -84,9 +84,19 @@ func postServerRestoreBackup(c *gin.Context) {
return return
} }
s.SetRestoring(true)
hasError := true
defer func() {
if !hasError {
return
}
s.SetRestoring(false)
}()
logger.Info("processing server backup restore request") logger.Info("processing server backup restore request")
if data.TruncateDirectory { if data.TruncateDirectory {
logger.Info(`recieved "truncate_directory" flag in request: deleting server files`) logger.Info("received \"truncate_directory\" flag in request: deleting server files")
if err := s.Filesystem().TruncateRootDirectory(); err != nil { if err := s.Filesystem().TruncateRootDirectory(); err != nil {
middleware.CaptureAndAbort(c, err) middleware.CaptureAndAbort(c, err)
return return
@@ -109,7 +119,9 @@ func postServerRestoreBackup(c *gin.Context) {
s.Events().Publish(server.DaemonMessageEvent, "Completed server restoration from local backup.") s.Events().Publish(server.DaemonMessageEvent, "Completed server restoration from local backup.")
s.Events().Publish(server.BackupRestoreCompletedEvent, "") s.Events().Publish(server.BackupRestoreCompletedEvent, "")
logger.Info("completed server restoration from local backup") logger.Info("completed server restoration from local backup")
s.SetRestoring(false)
}(s, b, logger) }(s, b, logger)
hasError = false
c.Status(http.StatusAccepted) c.Status(http.StatusAccepted)
return return
} }
@@ -136,7 +148,7 @@ func postServerRestoreBackup(c *gin.Context) {
} }
// Don't allow content types that we know are going to give us problems. // Don't allow content types that we know are going to give us problems.
if res.Header.Get("Content-Type") == "" || !strings.Contains("application/x-gzip application/gzip", res.Header.Get("Content-Type")) { if res.Header.Get("Content-Type") == "" || !strings.Contains("application/x-gzip application/gzip", res.Header.Get("Content-Type")) {
res.Body.Close() _ = res.Body.Close()
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "The provided backup link is not a supported content type. \"" + res.Header.Get("Content-Type") + "\" is not application/x-gzip.", "error": "The provided backup link is not a supported content type. \"" + res.Header.Get("Content-Type") + "\" is not application/x-gzip.",
}) })
@@ -151,8 +163,10 @@ func postServerRestoreBackup(c *gin.Context) {
s.Events().Publish(server.DaemonMessageEvent, "Completed server restoration from S3 backup.") s.Events().Publish(server.DaemonMessageEvent, "Completed server restoration from S3 backup.")
s.Events().Publish(server.BackupRestoreCompletedEvent, "") s.Events().Publish(server.BackupRestoreCompletedEvent, "")
logger.Info("completed server restoration from S3 backup") logger.Info("completed server restoration from S3 backup")
s.SetRestoring(false)
}(s, c.Param("backup"), logger) }(s, c.Param("backup"), logger)
hasError = false
c.Status(http.StatusAccepted) c.Status(http.StatusAccepted)
} }

View File

@@ -3,6 +3,7 @@ package router
import ( import (
"bufio" "bufio"
"context" "context"
"io"
"mime/multipart" "mime/multipart"
"net/http" "net/http"
"net/url" "net/url"
@@ -43,8 +44,16 @@ func getServerFileContents(c *gin.Context) {
c.Header("Content-Type", "application/octet-stream") c.Header("Content-Type", "application/octet-stream")
} }
defer c.Writer.Flush() defer c.Writer.Flush()
_, err = bufio.NewReader(f).WriteTo(c.Writer) // If you don't do a limited reader here you will trigger a panic on write when
if err != nil { // a different server process writes content to the file after you've already
// determined the file size. This could lead to some weird content output but
// it would technically be accurate based on the content at the time of the request.
//
// "http: wrote more than the declared Content-Length"
//
// @see https://github.com/pterodactyl/panel/issues/3131
r := io.LimitReader(f, st.Size())
if _, err = bufio.NewReader(r).WriteTo(c.Writer); err != nil {
// Pretty sure this will unleash chaos on the response, but its a risk we can // Pretty sure this will unleash chaos on the response, but its a risk we can
// take since a panic will at least be recovered and this should be incredibly // take since a panic will at least be recovered and this should be incredibly
// rare? // rare?
@@ -374,8 +383,6 @@ func postServerCompressFiles(c *gin.Context) {
// of unpacking an archive that exists on the server into the provided RootPath // of unpacking an archive that exists on the server into the provided RootPath
// for the server. // for the server.
func postServerDecompressFiles(c *gin.Context) { func postServerDecompressFiles(c *gin.Context) {
s := middleware.ExtractServer(c)
lg := middleware.ExtractLogger(c)
var data struct { var data struct {
RootPath string `json:"root"` RootPath string `json:"root"`
File string `json:"file"` File string `json:"file"`
@@ -384,7 +391,8 @@ func postServerDecompressFiles(c *gin.Context) {
return return
} }
lg = lg.WithFields(log.Fields{"root_path": data.RootPath, "file": data.File}) s := middleware.ExtractServer(c)
lg := middleware.ExtractLogger(c).WithFields(log.Fields{"root_path": data.RootPath, "file": data.File})
lg.Debug("checking if space is available for file decompression") lg.Debug("checking if space is available for file decompression")
err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File) err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File)
if err != nil { if err != nil {
@@ -403,7 +411,7 @@ func postServerDecompressFiles(c *gin.Context) {
// much we specifically can do. They'll need to stop the running server process in order to overwrite // much we specifically can do. They'll need to stop the running server process in order to overwrite
// a file like this. // a file like this.
if strings.Contains(err.Error(), "text file busy") { if strings.Contains(err.Error(), "text file busy") {
lg.WithField("error", err).Warn("failed to decompress file: text file busy") lg.WithField("error", errors.WithStackIf(err)).Warn("failed to decompress file: text file busy")
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "One or more files this archive is attempting to overwrite are currently in use by another process. Please try again.", "error": "One or more files this archive is attempting to overwrite are currently in use by another process. Please try again.",
}) })

View File

@@ -10,6 +10,7 @@ import (
"github.com/pterodactyl/wings/config" "github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/installer" "github.com/pterodactyl/wings/installer"
"github.com/pterodactyl/wings/router/middleware" "github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
) )
@@ -28,7 +29,12 @@ func getSystemInformation(c *gin.Context) {
// Returns all of the servers that are registered and configured correctly on // Returns all of the servers that are registered and configured correctly on
// this wings instance. // this wings instance.
func getAllServers(c *gin.Context) { func getAllServers(c *gin.Context) {
c.JSON(http.StatusOK, middleware.ExtractManager(c).All()) servers := middleware.ExtractManager(c).All()
out := make([]server.APIResponse, len(servers), len(servers))
for i, v := range servers {
out[i] = v.ToAPIResponse()
}
c.JSON(http.StatusOK, out)
} }
// Creates a new server on the wings daemon and begins the installation process // Creates a new server on the wings daemon and begins the installation process

View File

@@ -29,6 +29,7 @@ import (
"github.com/pterodactyl/wings/router/middleware" "github.com/pterodactyl/wings/router/middleware"
"github.com/pterodactyl/wings/router/tokens" "github.com/pterodactyl/wings/router/tokens"
"github.com/pterodactyl/wings/server" "github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/server/filesystem"
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
) )
@@ -51,6 +52,10 @@ type serverTransferRequest struct {
Server json.RawMessage `json:"server"` Server json.RawMessage `json:"server"`
} }
func getArchivePath(sID string) string {
return filepath.Join(config.Get().System.ArchiveDirectory, sID+".tar.gz")
}
// Returns the archive for a server so that it can be transferred to a new node. // Returns the archive for a server so that it can be transferred to a new node.
func getServerArchive(c *gin.Context) { func getServerArchive(c *gin.Context) {
auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2) auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2)
@@ -77,36 +82,51 @@ func getServerArchive(c *gin.Context) {
return return
} }
st, err := s.Archiver.Stat() archivePath := getArchivePath(s.Id())
// Stat the archive file.
st, err := os.Lstat(archivePath)
if err != nil { if err != nil {
if !errors.Is(err, os.ErrNotExist) { if !errors.Is(err, os.ErrNotExist) {
WithError(c, err) _ = WithError(c, err)
return return
} }
c.AbortWithStatus(http.StatusNotFound) c.AbortWithStatus(http.StatusNotFound)
return return
} }
checksum, err := s.Archiver.Checksum() // Compute sha1 checksum.
h := sha256.New()
f, err := os.Open(archivePath)
if err != nil { if err != nil {
NewServerError(err, s).SetMessage("failed to calculate checksum").Abort(c)
return return
} }
if _, err := io.Copy(h, bufio.NewReader(f)); err != nil {
_ = f.Close()
_ = WithError(c, err)
return
}
if err := f.Close(); err != nil {
_ = WithError(c, err)
return
}
checksum := hex.EncodeToString(h.Sum(nil))
file, err := os.Open(s.Archiver.Path()) // Stream the file to the client.
f, err = os.Open(archivePath)
if err != nil { if err != nil {
WithError(c, err) _ = WithError(c, err)
return return
} }
defer file.Close() defer f.Close()
c.Header("X-Checksum", checksum) c.Header("X-Checksum", checksum)
c.Header("X-Mime-Type", st.Mimetype) c.Header("X-Mime-Type", "application/tar+gzip")
c.Header("Content-Length", strconv.Itoa(int(st.Size()))) c.Header("Content-Length", strconv.Itoa(int(st.Size())))
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(s.Archiver.Name())) c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(s.Id()+".tar.gz"))
c.Header("Content-Type", "application/octet-stream") c.Header("Content-Type", "application/octet-stream")
bufio.NewReader(file).WriteTo(c.Writer) _, _ = bufio.NewReader(f).WriteTo(c.Writer)
} }
func postServerArchive(c *gin.Context) { func postServerArchive(c *gin.Context) {
@@ -164,8 +184,13 @@ func postServerArchive(c *gin.Context) {
return return
} }
// Create an archive of the entire server's data directory.
a := &filesystem.Archive{
BasePath: s.Filesystem().Path(),
}
// Attempt to get an archive of the server. // Attempt to get an archive of the server.
if err := s.Archiver.Archive(); err != nil { if err := a.Create(getArchivePath(s.Id())); err != nil {
sendTransferLog("An error occurred while archiving the server: " + err.Error()) sendTransferLog("An error occurred while archiving the server: " + err.Error())
l.WithField("error", err).Error("failed to get transfer archive for server") l.WithField("error", err).Error("failed to get transfer archive for server")
return return
@@ -227,7 +252,7 @@ func (str serverTransferRequest) downloadArchive() (*http.Response, error) {
// Returns the path to the local archive on the system. // Returns the path to the local archive on the system.
func (str serverTransferRequest) path() string { func (str serverTransferRequest) path() string {
return filepath.Join(config.Get().System.ArchiveDirectory, str.ServerID+".tar.gz") return getArchivePath(str.ServerID)
} }
// Creates the archive location on this machine by first checking that the required file // Creates the archive location on this machine by first checking that the required file
@@ -260,17 +285,16 @@ func (str serverTransferRequest) removeArchivePath() {
// expected value from the transfer request. The string value returned is the computed // expected value from the transfer request. The string value returned is the computed
// checksum on the system. // checksum on the system.
func (str serverTransferRequest) verifyChecksum(matches string) (bool, string, error) { func (str serverTransferRequest) verifyChecksum(matches string) (bool, string, error) {
file, err := os.Open(str.path()) f, err := os.Open(str.path())
if err != nil { if err != nil {
return false, "", err return false, "", err
} }
defer file.Close() defer f.Close()
hash := sha256.New() h := sha256.New()
buf := make([]byte, 1024*4) if _, err := io.Copy(h, bufio.NewReader(f)); err != nil {
if _, err := io.CopyBuffer(hash, file, buf); err != nil {
return false, "", err return false, "", err
} }
checksum := hex.EncodeToString(hash.Sum(nil)) checksum := hex.EncodeToString(h.Sum(nil))
return checksum == matches, checksum, nil return checksum == matches, checksum, nil
} }
@@ -362,7 +386,7 @@ func postTransfer(c *gin.Context) {
return return
} }
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode != 200 { if res.StatusCode != http.StatusOK {
data.log().WithField("error", err).WithField("status", res.StatusCode).Error("unexpected error response from transfer endpoint") data.log().WithField("error", err).WithField("status", res.StatusCode).Error("unexpected error response from transfer endpoint")
return return
} }

View File

@@ -1,120 +0,0 @@
package server
import (
"crypto/sha256"
"encoding/hex"
"io"
"io/ioutil"
"os"
"path/filepath"
"emperror.dev/errors"
"github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/server/filesystem"
)
// Archiver represents a Server Archiver.
type Archiver struct {
Server *Server
}
// Path returns the path to the server's archive.
func (a *Archiver) Path() string {
return filepath.Join(config.Get().System.ArchiveDirectory, a.Name())
}
// Name returns the name of the server's archive.
func (a *Archiver) Name() string {
return a.Server.Id() + ".tar.gz"
}
// Exists returns a boolean based off if the archive exists.
func (a *Archiver) Exists() bool {
if _, err := os.Stat(a.Path()); os.IsNotExist(err) {
return false
}
return true
}
// Stat stats the archive file.
func (a *Archiver) Stat() (*filesystem.Stat, error) {
s, err := os.Stat(a.Path())
if err != nil {
return nil, err
}
return &filesystem.Stat{
FileInfo: s,
Mimetype: "application/tar+gzip",
}, nil
}
// Archive creates an archive of the server and deletes the previous one.
func (a *Archiver) Archive() error {
path := a.Server.Filesystem().Path()
// Get the list of root files and directories to archive.
var files []string
fileInfo, err := ioutil.ReadDir(path)
if err != nil {
return err
}
for _, file := range fileInfo {
f := filepath.Join(path, file.Name())
// If the file is a symlink we cannot safely assume that the result of a filepath.Join() will be
// a safe destination. We need to check if the file is a symlink, and if so pass off to the SafePath
// function to resolve it to the final destination.
//
// ioutil.ReadDir() calls Lstat, so this will work correctly. If it did not call Lstat, but rather
// just did a normal Stat call, this would fail since that would be looking at the symlink destination
// and not the actual file in this listing.
if file.Mode()&os.ModeSymlink != 0 {
f, err = a.Server.Filesystem().SafePath(filepath.Join(path, file.Name()))
if err != nil {
return err
}
}
files = append(files, f)
}
if err := a.DeleteIfExists(); err != nil {
return err
}
return archiver.NewTarGz().Archive(files, a.Path())
}
// DeleteIfExists deletes the archive if it exists.
func (a *Archiver) DeleteIfExists() error {
if _, err := a.Stat(); err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return err
}
return errors.WithMessage(os.Remove(a.Path()), "archiver: failed to delete archive from system")
}
// Checksum computes a SHA256 checksum of the server's archive.
func (a *Archiver) Checksum() (string, error) {
file, err := os.Open(a.Path())
if err != nil {
return "", err
}
defer file.Close()
hash := sha256.New()
buf := make([]byte, 1024*4)
if _, err := io.CopyBuffer(hash, file, buf); err != nil {
return "", err
}
return hex.EncodeToString(hash.Sum(nil)), nil
}

View File

@@ -5,9 +5,10 @@ import (
"io" "io"
"os" "os"
"github.com/pterodactyl/wings/server/filesystem"
"github.com/mholt/archiver/v3" "github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/remote" "github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/system"
) )
type LocalBackup struct { type LocalBackup struct {
@@ -19,7 +20,7 @@ var _ BackupInterface = (*LocalBackup)(nil)
func NewLocal(client remote.Client, uuid string, ignore string) *LocalBackup { func NewLocal(client remote.Client, uuid string, ignore string) *LocalBackup {
return &LocalBackup{ return &LocalBackup{
Backup{ Backup{
client: client, client: client,
Uuid: uuid, Uuid: uuid,
Ignore: ignore, Ignore: ignore,
adapter: LocalBackupAdapter, adapter: LocalBackupAdapter,
@@ -56,7 +57,7 @@ func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
// Generate generates a backup of the selected files and pushes it to the // Generate generates a backup of the selected files and pushes it to the
// defined location for this instance. // defined location for this instance.
func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) { func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
a := &Archive{ a := &filesystem.Archive{
BasePath: basePath, BasePath: basePath,
Ignore: ignore, Ignore: ignore,
} }
@@ -77,10 +78,6 @@ func (b *LocalBackup) Restore(_ io.Reader, callback RestoreCallback) error {
if f.IsDir() { if f.IsDir() {
return nil return nil
} }
name, err := system.ExtractArchiveSourceName(f, "/") return callback(f.Name(), f)
if err != nil {
return err
}
return callback(name, f)
}) })
} }

View File

@@ -5,6 +5,7 @@ import (
"compress/gzip" "compress/gzip"
"context" "context"
"fmt" "fmt"
"github.com/pterodactyl/wings/server/filesystem"
"io" "io"
"net/http" "net/http"
"os" "os"
@@ -47,7 +48,7 @@ func (s *S3Backup) WithLogContext(c map[string]interface{}) {
func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) { func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
defer s.Remove() defer s.Remove()
a := &Archive{ a := &filesystem.Archive{
BasePath: basePath, BasePath: basePath,
Ignore: ignore, Ignore: ignore,
} }

View File

@@ -8,7 +8,7 @@ import (
type EggConfiguration struct { type EggConfiguration struct {
// The internal UUID of the Egg on the Panel. // The internal UUID of the Egg on the Panel.
ID string ID string `json:"id"`
// Maintains a list of files that are blacklisted for opening/editing/downloading // Maintains a list of files that are blacklisted for opening/editing/downloading
// or basically any type of access on the server by any user. This is NOT the same // or basically any type of access on the server by any user. This is NOT the same
@@ -43,7 +43,6 @@ type Configuration struct {
Build environment.Limits `json:"build"` Build environment.Limits `json:"build"`
CrashDetectionEnabled bool `default:"true" json:"enabled" yaml:"enabled"` CrashDetectionEnabled bool `default:"true" json:"enabled" yaml:"enabled"`
Mounts []Mount `json:"mounts"` Mounts []Mount `json:"mounts"`
Resources ResourceUsage `json:"resources"`
Egg EggConfiguration `json:"egg,omitempty"` Egg EggConfiguration `json:"egg,omitempty"`
Container struct { Container struct {

View File

@@ -13,6 +13,11 @@ import (
"github.com/pterodactyl/wings/system" "github.com/pterodactyl/wings/system"
) )
// appName is a local cache variable to avoid having to make expensive copies of
// the configuration every time we need to send output along to the websocket for
// a server.
var appName string
var ErrTooMuchConsoleData = errors.New("console is outputting too much data") var ErrTooMuchConsoleData = errors.New("console is outputting too much data")
type ConsoleThrottler struct { type ConsoleThrottler struct {
@@ -122,11 +127,14 @@ func (s *Server) Throttler() *ConsoleThrottler {
return s.throttler return s.throttler
} }
// Sends output to the server console formatted to appear correctly as being sent // PublishConsoleOutputFromDaemon sends output to the server console formatted
// from Wings. // to appear correctly as being sent from Wings.
func (s *Server) PublishConsoleOutputFromDaemon(data string) { func (s *Server) PublishConsoleOutputFromDaemon(data string) {
if appName == "" {
appName = config.Get().AppName
}
s.Events().Publish( s.Events().Publish(
ConsoleOutputEvent, ConsoleOutputEvent,
colorstring.Color(fmt.Sprintf("[yellow][bold][Pterodactyl Daemon]:[default] %s", data)), colorstring.Color(fmt.Sprintf("[yellow][bold][%s Daemon]:[default] %s", appName, data)),
) )
} }

View File

@@ -9,6 +9,7 @@ var (
ErrSuspended = errors.New("server is currently in a suspended state") ErrSuspended = errors.New("server is currently in a suspended state")
ErrServerIsInstalling = errors.New("server is currently installing") ErrServerIsInstalling = errors.New("server is currently installing")
ErrServerIsTransferring = errors.New("server is currently being transferred") ErrServerIsTransferring = errors.New("server is currently being transferred")
ErrServerIsRestoring = errors.New("server is currently being restored")
) )
type crashTooFrequent struct { type crashTooFrequent struct {

View File

@@ -1,4 +1,4 @@
package backup package filesystem
import ( import (
"archive/tar" "archive/tar"

View File

@@ -9,9 +9,8 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"emperror.dev/errors"
"github.com/mholt/archiver/v3" "github.com/mholt/archiver/v3"
"github.com/pterodactyl/wings/server/backup"
"github.com/pterodactyl/wings/system"
) )
// CompressFiles compresses all of the files matching the given paths in the // CompressFiles compresses all of the files matching the given paths in the
@@ -39,7 +38,7 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
return nil, err return nil, err
} }
a := &backup.Archive{BasePath: cleanedRootDir, Files: cleaned} a := &Archive{BasePath: cleanedRootDir, Files: cleaned}
d := path.Join( d := path.Join(
cleanedRootDir, cleanedRootDir,
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")), fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
@@ -87,13 +86,13 @@ func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) er
// Walk over the archive and figure out just how large the final output would be from unarchiving it. // Walk over the archive and figure out just how large the final output would be from unarchiving it.
err = archiver.Walk(source, func(f archiver.File) error { err = archiver.Walk(source, func(f archiver.File) error {
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() { if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
return &Error{code: ErrCodeDiskSpace} return newFilesystemError(ErrCodeDiskSpace, nil)
} }
return nil return nil
}) })
if err != nil { if err != nil {
if strings.HasPrefix(err.Error(), "format ") { if IsUnknownArchiveFormatError(err) {
return &Error{code: ErrCodeUnknownArchive} return newFilesystemError(ErrCodeUnknownArchive, err)
} }
return err return err
} }
@@ -112,7 +111,7 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
} }
// Ensure that the source archive actually exists on the system. // Ensure that the source archive actually exists on the system.
if _, err := os.Stat(source); err != nil { if _, err := os.Stat(source); err != nil {
return err return errors.WithStack(err)
} }
// Walk all of the files in the archiver file and write them to the disk. If any // Walk all of the files in the archiver file and write them to the disk. If any
@@ -122,26 +121,21 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error {
if f.IsDir() { if f.IsDir() {
return nil return nil
} }
name, err := system.ExtractArchiveSourceName(f, dir) p := filepath.Join(dir, f.Name())
if err != nil {
return WrapError(err, filepath.Join(dir, f.Name()))
}
p := filepath.Join(dir, name)
// If it is ignored, just don't do anything with the file and skip over it. // If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil { if err := fs.IsIgnored(p); err != nil {
return nil return nil
} }
if err := fs.Writefile(p, f); err != nil { if err := fs.Writefile(p, f); err != nil {
return &Error{code: ErrCodeUnknownError, err: err, resolved: source} return wrapError(err, source)
} }
return nil return nil
}) })
if err != nil { if err != nil {
if strings.HasPrefix(err.Error(), "format ") { if IsUnknownArchiveFormatError(err) {
return &Error{code: ErrCodeUnknownArchive} return newFilesystemError(ErrCodeUnknownArchive, err)
} }
return err return err
} }
return nil return nil
} }

View File

@@ -1,13 +1,14 @@
package filesystem package filesystem
import ( import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/karrick/godirwalk"
"sync" "sync"
"sync/atomic" "sync/atomic"
"syscall" "syscall"
"time" "time"
"emperror.dev/errors"
"github.com/apex/log"
"github.com/karrick/godirwalk"
) )
type SpaceCheckingOpts struct { type SpaceCheckingOpts struct {
@@ -48,7 +49,7 @@ func (fs *Filesystem) SetDiskLimit(i int64) {
// no space, rather than a boolean value. // no space, rather than a boolean value.
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error { func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
if !fs.HasSpaceAvailable(allowStaleValue) { if !fs.HasSpaceAvailable(allowStaleValue) {
return &Error{code: ErrCodeDiskSpace} return newFilesystemError(ErrCodeDiskSpace, nil)
} }
return nil return nil
} }
@@ -200,16 +201,13 @@ func (fs *Filesystem) HasSpaceFor(size int64) error {
if fs.MaxDisk() == 0 { if fs.MaxDisk() == 0 {
return nil return nil
} }
s, err := fs.DiskUsage(true) s, err := fs.DiskUsage(true)
if err != nil { if err != nil {
return err return err
} }
if (s + size) > fs.MaxDisk() { if (s + size) > fs.MaxDisk() {
return &Error{code: ErrCodeDiskSpace} return newFilesystemError(ErrCodeDiskSpace, nil)
} }
return nil return nil
} }

View File

@@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/apex/log" "github.com/apex/log"
@@ -34,6 +35,14 @@ type Error struct {
path string path string
} }
// newFilesystemError returns a new error instance with a stack trace associated.
func newFilesystemError(code ErrorCode, err error) error {
if err != nil {
return errors.WithStackDepth(&Error{code: code, err: err}, 1)
}
return errors.WithStackDepth(&Error{code: code}, 1)
}
// Code returns the ErrorCode for this specific error instance. // Code returns the ErrorCode for this specific error instance.
func (e *Error) Code() ErrorCode { func (e *Error) Code() ErrorCode {
return e.code return e.code
@@ -63,13 +72,13 @@ func (e *Error) Error() string {
case ErrCodeUnknownError: case ErrCodeUnknownError:
fallthrough fallthrough
default: default:
return fmt.Sprintf("filesystem: an error occurred: %s", e.Cause()) return fmt.Sprintf("filesystem: an error occurred: %s", e.Unwrap())
} }
} }
// Cause returns the underlying cause of this filesystem error. In some causes // Unwrap returns the underlying cause of this filesystem error. In some causes
// there may not be a cause present, in which case nil will be returned. // there may not be a cause present, in which case nil will be returned.
func (e *Error) Cause() error { func (e *Error) Unwrap() error {
return e.err return e.err
} }
@@ -113,20 +122,26 @@ func IsErrorCode(err error, code ErrorCode) bool {
return false return false
} }
// NewBadPathResolution returns a new BadPathResolution error. // IsUnknownArchiveFormatError checks if the error is due to the archive being
func NewBadPathResolution(path string, resolved string) *Error { // in an unexpected file format.
return &Error{code: ErrCodePathResolution, path: path, resolved: resolved} func IsUnknownArchiveFormatError(err error) bool {
if err != nil && strings.HasPrefix(err.Error(), "format ") {
return true
}
return false
} }
// WrapError wraps the provided error as a Filesystem error and attaches the // NewBadPathResolution returns a new BadPathResolution error.
func NewBadPathResolution(path string, resolved string) error {
return errors.WithStackDepth(&Error{code: ErrCodePathResolution, path: path, resolved: resolved}, 1)
}
// wrapError wraps the provided error as a Filesystem error and attaches the
// provided resolved source to it. If the error is already a Filesystem error // provided resolved source to it. If the error is already a Filesystem error
// no action is taken. // no action is taken.
func WrapError(err error, resolved string) *Error { func wrapError(err error, resolved string) error {
if err == nil { if err == nil || IsFilesystemError(err) {
return nil return err
} }
if IsFilesystemError(err) { return errors.WithStackDepth(&Error{code: ErrCodeUnknownError, err: err, resolved: resolved}, 1)
return err.(*Error)
}
return &Error{code: ErrCodeUnknownError, err: err, resolved: resolved}
} }

View File

@@ -1,13 +1,45 @@
package filesystem package filesystem
import ( import (
. "github.com/franela/goblin" "io"
"testing" "testing"
"emperror.dev/errors"
. "github.com/franela/goblin"
) )
type stackTracer interface {
StackTrace() errors.StackTrace
}
func TestFilesystem_PathResolutionError(t *testing.T) { func TestFilesystem_PathResolutionError(t *testing.T) {
g := Goblin(t) g := Goblin(t)
g.Describe("NewFilesystemError", func() {
g.It("includes a stack trace for the error", func() {
err := newFilesystemError(ErrCodeUnknownError, nil)
_, ok := err.(stackTracer)
g.Assert(ok).IsTrue()
})
g.It("properly wraps the underlying error cause", func() {
underlying := io.EOF
err := newFilesystemError(ErrCodeUnknownError, underlying)
_, ok := err.(stackTracer)
g.Assert(ok).IsTrue()
_, ok = err.(*Error)
g.Assert(ok).IsFalse()
fserr, ok := errors.Unwrap(err).(*Error)
g.Assert(ok).IsTrue()
g.Assert(fserr.Unwrap()).IsNotNil()
g.Assert(fserr.Unwrap()).Equal(underlying)
})
})
g.Describe("NewBadPathResolutionError", func() { g.Describe("NewBadPathResolutionError", func() {
g.It("is can detect itself as an error correctly", func() { g.It("is can detect itself as an error correctly", func() {
err := NewBadPathResolution("foo", "bar") err := NewBadPathResolution("foo", "bar")
@@ -18,6 +50,7 @@ func TestFilesystem_PathResolutionError(t *testing.T) {
g.It("returns <empty> if no destination path is provided", func() { g.It("returns <empty> if no destination path is provided", func() {
err := NewBadPathResolution("foo", "") err := NewBadPathResolution("foo", "")
g.Assert(err).IsNotNil()
g.Assert(err.Error()).Equal("filesystem: server path [foo] resolves to a location outside the server root: <empty>") g.Assert(err.Error()).Equal("filesystem: server path [foo] resolves to a location outside the server root: <empty>")
}) })
}) })

View File

@@ -67,7 +67,7 @@ func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
return nil, Stat{}, err return nil, Stat{}, err
} }
if st.IsDir() { if st.IsDir() {
return nil, Stat{}, &Error{code: ErrCodeIsDirectory} return nil, Stat{}, newFilesystemError(ErrCodeIsDirectory, nil)
} }
f, err := os.Open(cleaned) f, err := os.Open(cleaned)
if err != nil { if err != nil {
@@ -144,7 +144,7 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file") return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
} else if err == nil { } else if err == nil {
if stat.IsDir() { if stat.IsDir() {
return &Error{code: ErrCodeIsDirectory, resolved: cleaned} return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: cleaned})
} }
currentSize = stat.Size() currentSize = stat.Size()
} }

View File

@@ -20,7 +20,7 @@ func (fs *Filesystem) IsIgnored(paths ...string) error {
return err return err
} }
if fs.denylist.MatchesPath(sp) { if fs.denylist.MatchesPath(sp) {
return &Error{code: ErrCodeDenylistFile, path: p, resolved: sp} return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: sp})
} }
} }
return nil return nil

View File

@@ -2,11 +2,12 @@ package filesystem
import ( import (
"bytes" "bytes"
"emperror.dev/errors"
. "github.com/franela/goblin"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"emperror.dev/errors"
. "github.com/franela/goblin"
) )
func TestFilesystem_Path(t *testing.T) { func TestFilesystem_Path(t *testing.T) {

View File

@@ -151,6 +151,14 @@ func (s *Server) SetTransferring(state bool) {
s.transferring.Store(state) s.transferring.Store(state)
} }
func (s *Server) IsRestoring() bool {
return s.restoring.Load()
}
func (s *Server) SetRestoring(state bool) {
s.restoring.Store(state)
}
// Removes the installer container for the server. // Removes the installer container for the server.
func (ip *InstallationProcess) RemoveContainer() error { func (ip *InstallationProcess) RemoveContainer() error {
err := ip.client.ContainerRemove(ip.context, ip.Server.Id()+"_installer", types.ContainerRemoveOptions{ err := ip.client.ContainerRemove(ip.context, ip.Server.Id()+"_installer", types.ContainerRemoveOptions{

View File

@@ -175,7 +175,6 @@ func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server,
return nil, err return nil, err
} }
s.Archiver = Archiver{Server: s}
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace(), s.Config().Egg.FileDenylist) s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace(), s.Config().Egg.FileDenylist)
// Right now we only support a Docker based environment, so I'm going to hard code // Right now we only support a Docker based environment, so I'm going to hard code

View File

@@ -70,6 +70,10 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
return ErrServerIsTransferring return ErrServerIsTransferring
} }
if s.IsRestoring() {
return ErrServerIsRestoring
}
if s.powerLock == nil { if s.powerLock == nil {
s.powerLock = semaphore.NewWeighted(1) s.powerLock = semaphore.NewWeighted(1)
} }

View File

@@ -42,7 +42,6 @@ type Server struct {
crasher CrashHandler crasher CrashHandler
resources ResourceUsage resources ResourceUsage
Archiver Archiver `json:"-"`
Environment environment.ProcessEnvironment `json:"-"` Environment environment.ProcessEnvironment `json:"-"`
fs *filesystem.Filesystem fs *filesystem.Filesystem
@@ -61,6 +60,7 @@ type Server struct {
// installer process is still running. // installer process is still running.
installing *system.AtomicBool installing *system.AtomicBool
transferring *system.AtomicBool transferring *system.AtomicBool
restoring *system.AtomicBool
// The console throttler instance used to control outputs. // The console throttler instance used to control outputs.
throttler *ConsoleThrottler throttler *ConsoleThrottler
@@ -80,6 +80,7 @@ func New(client remote.Client) (*Server, error) {
client: client, client: client,
installing: system.NewAtomicBool(false), installing: system.NewAtomicBool(false),
transferring: system.NewAtomicBool(false), transferring: system.NewAtomicBool(false),
restoring: system.NewAtomicBool(false),
} }
if err := defaults.Set(&s); err != nil { if err := defaults.Set(&s); err != nil {
return nil, errors.Wrap(err, "server: could not set default values for struct") return nil, errors.Wrap(err, "server: could not set default values for struct")
@@ -142,12 +143,12 @@ func (s *Server) Log() *log.Entry {
return log.WithField("server", s.Id()) return log.WithField("server", s.Id())
} }
// Syncs the state of the server on the Panel with Wings. This ensures that we're always // Sync syncs the state of the server on the Panel with Wings. This ensures that
// using the state of the server from the Panel and allows us to not require successful // we're always using the state of the server from the Panel and allows us to
// API calls to Wings to do things. // not require successful API calls to Wings to do things.
// //
// This also means mass actions can be performed against servers on the Panel and they // This also means mass actions can be performed against servers on the Panel
// will automatically sync with Wings when the server is started. // and they will automatically sync with Wings when the server is started.
func (s *Server) Sync() error { func (s *Server) Sync() error {
cfg, err := s.client.GetServerConfiguration(s.Context(), s.Id()) cfg, err := s.client.GetServerConfiguration(s.Context(), s.Id())
if err != nil { if err != nil {
@@ -304,3 +305,24 @@ func (s *Server) IsRunning() bool {
return st == environment.ProcessRunningState || st == environment.ProcessStartingState return st == environment.ProcessRunningState || st == environment.ProcessStartingState
} }
// APIResponse is a type returned when requesting details about a single server
// instance on Wings. This includes the information needed by the Panel in order
// to show resource utilization and the current state on this system.
type APIResponse struct {
State string `json:"state"`
IsSuspended bool `json:"is_suspended"`
Utilization ResourceUsage `json:"utilization"`
Configuration Configuration `json:"configuration"`
}
// ToAPIResponse returns the server struct as an API object that can be consumed
// by callers.
func (s *Server) ToAPIResponse() APIResponse {
return APIResponse{
State: s.Environment.State(),
IsSuspended: s.IsSuspended(),
Utilization: s.Proc(),
Configuration: *s.Config(),
}
}

View File

@@ -1,23 +1,18 @@
package system package system
import ( import (
"archive/tar"
"archive/zip"
"bufio" "bufio"
"bytes" "bytes"
"compress/gzip"
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"reflect"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"emperror.dev/errors" "emperror.dev/errors"
"github.com/mholt/archiver/v3"
) )
var cr = []byte(" \r") var cr = []byte(" \r")
@@ -41,22 +36,6 @@ func MustInt(v string) int {
return i return i
} }
// ExtractArchiveSourceName looks for the provided archiver.File's name if it is
// a type that is supported, otherwise it returns an error to the caller.
func ExtractArchiveSourceName(f archiver.File, dir string) (name string, err error) {
switch s := f.Sys().(type) {
case *tar.Header:
name = s.Name
case *gzip.Header:
name = s.Name
case *zip.FileHeader:
name = s.Name
default:
err = errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String()))
}
return name, err
}
func ScanReader(r io.Reader, callback func(line string)) error { func ScanReader(r io.Reader, callback func(line string)) error {
br := bufio.NewReader(r) br := bufio.NewReader(r)
// Avoid constantly re-allocating memory when we're flooding lines through this // Avoid constantly re-allocating memory when we're flooding lines through this