Compare commits
75 Commits
v1.0.0-bet
...
v1.0.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7746a8359 | ||
|
|
cb850fd81a | ||
|
|
5079c67aee | ||
|
|
e28c05ae56 | ||
|
|
21e58b57a1 | ||
|
|
16467fa7ff | ||
|
|
0cbaad5c72 | ||
|
|
a00288aa64 | ||
|
|
6de18f09e5 | ||
|
|
8315ff8ae1 | ||
|
|
0b9d923d15 | ||
|
|
f0eeaae747 | ||
|
|
085a02726b | ||
|
|
4f1b0c67d6 | ||
|
|
6e1844a8c9 | ||
|
|
7c3da84248 | ||
|
|
1b5684e6f8 | ||
|
|
115131575d | ||
|
|
21303dc517 | ||
|
|
daf682b991 | ||
|
|
a72d6f3768 | ||
|
|
d262c12b43 | ||
|
|
f3c8220bd9 | ||
|
|
7e1b7e7f36 | ||
|
|
b2d34cf8e7 | ||
|
|
a635cdd6b2 | ||
|
|
ae46add8ef | ||
|
|
a4e6c4b701 | ||
|
|
f4c10e5a23 | ||
|
|
b64f1897fb | ||
|
|
6fd7ed23e3 | ||
|
|
a98e376593 | ||
|
|
eefc11bd0d | ||
|
|
60ebde4447 | ||
|
|
b3eba78743 | ||
|
|
233cefd129 | ||
|
|
d60b2d6163 | ||
|
|
292f0d6452 | ||
|
|
7147f477e2 | ||
|
|
2cef055ff2 | ||
|
|
daf401e326 | ||
|
|
c1e591c99b | ||
|
|
79928aff76 | ||
|
|
1f1eb507a9 | ||
|
|
93228933bf | ||
|
|
7afd5854bd | ||
|
|
fe531e400d | ||
|
|
1c92178091 | ||
|
|
be990c9620 | ||
|
|
31d00333a7 | ||
|
|
7516ef1aa4 | ||
|
|
5ef58cadee | ||
|
|
a110d5768f | ||
|
|
13b89b93f2 | ||
|
|
39f3408e4f | ||
|
|
c04042d07a | ||
|
|
7d9c608f6b | ||
|
|
7a456dcac4 | ||
|
|
c071df2a31 | ||
|
|
f7948939eb | ||
|
|
f3419495cd | ||
|
|
9f95efa3ae | ||
|
|
3a6050446f | ||
|
|
63e7bde39c | ||
|
|
d339996b4e | ||
|
|
deb9305f56 | ||
|
|
c5f4c3cfcb | ||
|
|
860e300c22 | ||
|
|
82912595b7 | ||
|
|
65809b5731 | ||
|
|
e5b844d2c4 | ||
|
|
ea2630946a | ||
|
|
79a582a5f2 | ||
|
|
4483bfa2aa | ||
|
|
662eb17241 |
33
.github/workflows/build-test.yml
vendored
Normal file
33
.github/workflows/build-test.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: "Build & Test"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- 'master'
|
||||
- 'release/**'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.14.2'
|
||||
|
||||
- name: Build
|
||||
run: GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -ldflags "-X github.com/pterodactyl/wings/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_linux_amd64 -v wings.go
|
||||
|
||||
- name: Test
|
||||
run: go test ./...
|
||||
|
||||
- name: Compress binary and make it executable
|
||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
||||
run: upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
||||
with:
|
||||
name: wings_linux_amd64
|
||||
path: build/wings_linux_amd64
|
||||
35
.github/workflows/codeql-analysis.yml
vendored
Normal file
35
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
schedule:
|
||||
- cron: '0 21 * * 6'
|
||||
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
with:
|
||||
languages: go
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
87
.github/workflows/release.yml
vendored
Normal file
87
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
name: "Release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.14.2'
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
REF: ${{ github.ref }}
|
||||
run: GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -ldflags "-X github.com/pterodactyl/wings/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_linux_amd64 -v wings.go
|
||||
|
||||
- name: Test
|
||||
run: go test ./...
|
||||
|
||||
- name: Compress binary and make it executable
|
||||
run: upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
||||
|
||||
- name: Extract changelog
|
||||
env:
|
||||
REF: ${{ github.ref }}
|
||||
run: |
|
||||
sed -n "/^## ${REF:10}/,/^## /{/^## /b;p}" CHANGELOG.md > ./RELEASE_CHANGELOG
|
||||
echo ::set-output name=version_name::`sed -nr "s/^## (${REF:10} .*)$/\1/p" CHANGELOG.md`
|
||||
|
||||
- name: Create checksum and add to changelog
|
||||
run: |
|
||||
SUM=`cd build && sha256sum wings_linux_amd64`
|
||||
echo -e "\n#### SHA256 Checksum\n\n\`\`\`\n$SUM\n\`\`\`\n" >> ./RELEASE_CHANGELOG
|
||||
echo $SUM > checksum.txt
|
||||
|
||||
- name: Create release branch
|
||||
env:
|
||||
REF: ${{ github.ref }}
|
||||
run: |
|
||||
BRANCH=release/${REF:10}
|
||||
git config --local user.email "ci@pterodactyl.io"
|
||||
git config --local user.name "Pterodactyl CI"
|
||||
git checkout -b $BRANCH
|
||||
git push -u origin $BRANCH
|
||||
sed -i "s/ Version = \".*\"/ Version = \"${REF:11}\"/" system/const.go
|
||||
git add system/const.go
|
||||
git commit -m "bump version for release"
|
||||
git push
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: ${{ steps.extract_changelog.outputs.version_name }}
|
||||
body_path: ./RELEASE_CHANGELOG
|
||||
draft: true
|
||||
prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
|
||||
|
||||
- name: Upload binary
|
||||
id: upload-release-binary
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: build/wings_linux_amd64
|
||||
asset_name: wings_linux_amd64
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
- name: Upload checksum
|
||||
id: upload-release-checksum
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./checksum.txt
|
||||
asset_name: checksum.txt
|
||||
asset_content_type: text/plain
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/sftp-server"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func (r *PanelRequest) ValidateSftpCredentials(request sftp_server.AuthenticationRequest) (*sftp_server.AuthenticationResponse, error) {
|
||||
@@ -23,13 +22,10 @@ func (r *PanelRequest) ValidateSftpCredentials(request sftp_server.Authenticatio
|
||||
|
||||
if r.HasError() {
|
||||
if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 {
|
||||
zap.S().Debugw("failed to validate server credentials for SFTP", zap.String("error", r.Error().String()))
|
||||
|
||||
return nil, new(sftp_server.InvalidCredentialsError)
|
||||
}
|
||||
|
||||
rerr := errors.New(r.Error().String())
|
||||
zap.S().Warnw("error validating SFTP credentials", zap.Error(rerr))
|
||||
|
||||
return nil, rerr
|
||||
}
|
||||
|
||||
226
cmd/diagnostics.go
Normal file
226
cmd/diagnostics.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/AlecAivazis/survey/v2/terminal"
|
||||
"github.com/docker/cli/components/engine/pkg/parsers/operatingsystem"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const DefaultHastebinUrl = "https://hastebin.com"
|
||||
|
||||
var (
|
||||
diagnosticsArgs struct {
|
||||
IncludeEndpoints bool
|
||||
IncludeLogs bool
|
||||
ReviewBeforeUpload bool
|
||||
HastebinURL string
|
||||
}
|
||||
)
|
||||
|
||||
var diagnosticsCmd = &cobra.Command{
|
||||
Use: "diagnostics",
|
||||
Short: "Collect diagnostics information.",
|
||||
Run: diagnosticsCmdRun,
|
||||
}
|
||||
|
||||
func init() {
|
||||
diagnosticsCmd.PersistentFlags().StringVar(&diagnosticsArgs.HastebinURL, "hastebin-url", DefaultHastebinUrl, "The url of the hastebin instance to use.")
|
||||
}
|
||||
|
||||
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
|
||||
// We collect:
|
||||
// - wings and docker versions
|
||||
// - relevant parts of daemon configuration
|
||||
// - the docker debug output
|
||||
// - running docker containers
|
||||
// - logs
|
||||
func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||
questions := []*survey.Question{
|
||||
{
|
||||
Name: "IncludeEndpoints",
|
||||
Prompt: &survey.Confirm{Message: "Do you want to include endpoints (i.e. the FQDN/IP of your panel)?", Default: false},
|
||||
},
|
||||
{
|
||||
Name: "IncludeLogs",
|
||||
Prompt: &survey.Confirm{Message: "Do you want to include the latest logs?", Default: true},
|
||||
},
|
||||
{
|
||||
Name: "ReviewBeforeUpload",
|
||||
Prompt: &survey.Confirm{
|
||||
Message: "Do you want to review the collected data before uploading to hastebin.com?",
|
||||
Help: "The data, especially the logs, might contain sensitive information, so you should review it. You will be asked again if you want to uplaod.",
|
||||
Default: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := survey.Ask(questions, &diagnosticsArgs); err != nil {
|
||||
if err == terminal.InterruptErr {
|
||||
return
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
|
||||
dockerVersion, dockerInfo, dockerErr := getDockerInfo()
|
||||
_ = dockerInfo
|
||||
|
||||
output := &strings.Builder{}
|
||||
fmt.Fprintln(output, "Pterodactly Wings - Diagnostics Report")
|
||||
printHeader(output, "Versions")
|
||||
fmt.Fprintln(output, "wings:", system.Version)
|
||||
if dockerErr == nil {
|
||||
fmt.Fprintln(output, "Docker", dockerVersion.Version)
|
||||
}
|
||||
if v, err := kernel.GetKernelVersion(); err == nil {
|
||||
fmt.Fprintln(output, "Kernel:", v)
|
||||
}
|
||||
if os, err := operatingsystem.GetOperatingSystem(); err == nil {
|
||||
fmt.Fprintln(output, "OS:", os)
|
||||
}
|
||||
|
||||
printHeader(output, "Wings Configuration")
|
||||
if cfg, err := config.ReadConfiguration(config.DefaultLocation); cfg != nil {
|
||||
fmt.Fprintln(output, "Panel Location:", redact(cfg.PanelLocation))
|
||||
fmt.Fprintln(output, "Api Host:", redact(cfg.Api.Host))
|
||||
fmt.Fprintln(output, "Api Port:", cfg.Api.Port)
|
||||
fmt.Fprintln(output, "Api Ssl Enabled:", cfg.Api.Ssl.Enabled)
|
||||
fmt.Fprintln(output, "Api Ssl Certificate:", redact(cfg.Api.Ssl.CertificateFile))
|
||||
fmt.Fprintln(output, "Api Ssl Key:", redact(cfg.Api.Ssl.KeyFile))
|
||||
fmt.Fprintln(output, "Sftp Address:", redact(cfg.System.Sftp.Address))
|
||||
fmt.Fprintln(output, "Sftp Port:", cfg.System.Sftp.Port)
|
||||
fmt.Fprintln(output, "Sftp Read Only:", cfg.System.Sftp.ReadOnly)
|
||||
fmt.Fprintln(output, "Sftp Diskchecking Disabled:", cfg.System.Sftp.DisableDiskChecking)
|
||||
fmt.Fprintln(output, "System Root Directory:", cfg.System.RootDirectory)
|
||||
fmt.Fprintln(output, "System Logs Directory:", cfg.System.LogDirectory)
|
||||
fmt.Fprintln(output, "System Data Directory:", cfg.System.Data)
|
||||
fmt.Fprintln(output, "System Archive Directory:", cfg.System.ArchiveDirectory)
|
||||
fmt.Fprintln(output, "System Backup Directory:", cfg.System.BackupDirectory)
|
||||
fmt.Fprintln(output, "System Username:", cfg.System.Username)
|
||||
fmt.Fprintln(output, "Debug Enabled:", cfg.Debug)
|
||||
} else {
|
||||
fmt.Println("Failed to load configuration.", err)
|
||||
}
|
||||
|
||||
printHeader(output, "Docker: Info")
|
||||
fmt.Fprintln(output, "Server Version:", dockerInfo.ServerVersion)
|
||||
fmt.Fprintln(output, "Storage Driver:", dockerInfo.Driver)
|
||||
if dockerInfo.DriverStatus != nil {
|
||||
for _, pair := range dockerInfo.DriverStatus {
|
||||
fmt.Fprintf(output, " %s: %s\n", pair[0], pair[1])
|
||||
}
|
||||
}
|
||||
if dockerInfo.SystemStatus != nil {
|
||||
for _, pair := range dockerInfo.SystemStatus {
|
||||
fmt.Fprintf(output, " %s: %s\n", pair[0], pair[1])
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(output, "LoggingDriver:", dockerInfo.LoggingDriver)
|
||||
fmt.Fprintln(output, "CgroupDriver:", dockerInfo.CgroupDriver)
|
||||
if len(dockerInfo.Warnings) > 0 {
|
||||
for _, w := range dockerInfo.Warnings {
|
||||
fmt.Fprintln(output, w)
|
||||
}
|
||||
}
|
||||
|
||||
printHeader(output, "Docker: Running Containers")
|
||||
c := exec.Command("docker", "ps")
|
||||
if co, err := c.Output(); err == nil {
|
||||
output.Write(co)
|
||||
} else {
|
||||
fmt.Fprint(output, "Couldn't list containers: ", err)
|
||||
}
|
||||
|
||||
printHeader(output, "Latest Wings Logs")
|
||||
if diagnosticsArgs.IncludeLogs {
|
||||
fmt.Fprintln(output, "No logs found. Probably because nobody implemented logging to files yet :(")
|
||||
} else {
|
||||
fmt.Fprintln(output, "Logs redacted.")
|
||||
}
|
||||
|
||||
fmt.Println("\n--------------- generated report ---------------")
|
||||
fmt.Println(output.String())
|
||||
fmt.Print("--------------- end of report ---------------\n\n")
|
||||
|
||||
upload := !diagnosticsArgs.ReviewBeforeUpload
|
||||
if !upload {
|
||||
survey.AskOne(&survey.Confirm{Message: "Upload to " + diagnosticsArgs.HastebinURL + "?", Default: false}, &upload)
|
||||
}
|
||||
if upload {
|
||||
url, err := uploadToHastebin(diagnosticsArgs.HastebinURL, output.String())
|
||||
if err == nil {
|
||||
fmt.Println("Your report is available here: ", url)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getDockerInfo() (types.Version, types.Info, error) {
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
return types.Version{}, types.Info{}, err
|
||||
}
|
||||
dockerVersion, err := cli.ServerVersion(context.Background())
|
||||
if err != nil {
|
||||
return types.Version{}, types.Info{}, err
|
||||
}
|
||||
dockerInfo, err := cli.Info(context.Background())
|
||||
if err != nil {
|
||||
return types.Version{}, types.Info{}, err
|
||||
}
|
||||
return dockerVersion, dockerInfo, nil
|
||||
}
|
||||
|
||||
func uploadToHastebin(hbUrl, content string) (string, error) {
|
||||
r := strings.NewReader(content)
|
||||
u, err := url.Parse(hbUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
u.Path = path.Join(u.Path, "documents")
|
||||
res, err := http.Post(u.String(), "plain/text", r)
|
||||
if err != nil || res.StatusCode != 200 {
|
||||
fmt.Println("Failed to upload report to ", u.String(), err)
|
||||
return "", err
|
||||
}
|
||||
pres := make(map[string]interface{})
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to parse response.", err)
|
||||
return "", err
|
||||
}
|
||||
json.Unmarshal(body, &pres)
|
||||
if key, ok := pres["key"].(string); ok {
|
||||
u, _ := url.Parse(hbUrl)
|
||||
u.Path = path.Join(u.Path, key)
|
||||
return u.String(), nil
|
||||
}
|
||||
return "", errors.New("Couldn't find key in response")
|
||||
}
|
||||
|
||||
func redact(s string) string {
|
||||
if !diagnosticsArgs.IncludeEndpoints {
|
||||
return "{redacted}"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func printHeader(w io.Writer, title string) {
|
||||
fmt.Fprintln(w, "\n|\n|", title)
|
||||
fmt.Fprintln(w, "| ------------------------------")
|
||||
}
|
||||
71
cmd/root.go
71
cmd/root.go
@@ -3,14 +3,16 @@ package cmd
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/mitchellh/colorstring"
|
||||
"github.com/pterodactyl/wings/loggers/cli"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/apex/log"
|
||||
"github.com/mitchellh/colorstring"
|
||||
"github.com/pterodactyl/wings/loggers/cli"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/profile"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
@@ -27,20 +29,33 @@ import (
|
||||
var configPath = config.DefaultLocation
|
||||
var debug = false
|
||||
var shouldRunProfiler = false
|
||||
var useAutomaticTls = false
|
||||
var tlsHostname = ""
|
||||
var showVersion = false
|
||||
|
||||
var root = &cobra.Command{
|
||||
Use: "wings",
|
||||
Short: "The wings of the pterodactyl game management panel",
|
||||
Long: ``,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
if useAutomaticTls && len(tlsHostname) == 0 {
|
||||
fmt.Println("A TLS hostname must be provided when running wings with automatic TLS, e.g.:\n\n ./wings --auto-tls --tls-hostname my.example.com")
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
Run: rootCmdRun,
|
||||
}
|
||||
|
||||
func init() {
|
||||
root.PersistentFlags().BoolVar(&showVersion, "version", false, "show the version and exit")
|
||||
root.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
||||
root.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
||||
root.PersistentFlags().BoolVar(&shouldRunProfiler, "profile", false, "pass in order to profile wings")
|
||||
root.PersistentFlags().BoolVar(&useAutomaticTls, "auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
|
||||
root.PersistentFlags().StringVar(&tlsHostname, "tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
||||
|
||||
root.AddCommand(configureCmd)
|
||||
root.AddCommand(diagnosticsCmd)
|
||||
}
|
||||
|
||||
// Get the configuration path based on the arguments provided.
|
||||
@@ -65,6 +80,11 @@ func readConfiguration() (*config.Configuration, error) {
|
||||
}
|
||||
|
||||
func rootCmdRun(*cobra.Command, []string) {
|
||||
if showVersion {
|
||||
fmt.Println(system.Version)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if shouldRunProfiler {
|
||||
defer profile.Start().Stop()
|
||||
}
|
||||
@@ -149,7 +169,7 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
|
||||
// Just for some nice log output.
|
||||
for _, s := range server.GetServers().All() {
|
||||
log.WithField("server", s.Uuid).Info("loaded configuration for server")
|
||||
log.WithField("server", s.Id()).Info("loaded configuration for server")
|
||||
}
|
||||
|
||||
// Create a new WaitGroup that limits us to 4 servers being bootstrapped at a time
|
||||
@@ -161,17 +181,13 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
wg.Add()
|
||||
|
||||
go func(s *server.Server) {
|
||||
// Required for tracing purposes.
|
||||
var err error
|
||||
defer wg.Done()
|
||||
|
||||
defer func() {
|
||||
s.Log().Trace("ensuring server environment exists").Stop(&err)
|
||||
wg.Done()
|
||||
}()
|
||||
s.Log().Info("ensuring server environment exists")
|
||||
|
||||
// Create a server environment if none exists currently. This allows us to recover from Docker
|
||||
// being reinstalled on the host system for example.
|
||||
if err = s.Environment.Create(); err != nil {
|
||||
if err := s.Environment.Create(); err != nil {
|
||||
s.Log().WithField("error", err).Error("failed to process environment")
|
||||
}
|
||||
|
||||
@@ -218,17 +234,38 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"ssl": c.Api.Ssl.Enabled,
|
||||
"host": c.Api.Host,
|
||||
"port": c.Api.Port,
|
||||
}).Info("configuring webserver...")
|
||||
"use_ssl": c.Api.Ssl.Enabled,
|
||||
"use_auto_tls": useAutomaticTls && len(tlsHostname) > 0,
|
||||
"host_address": c.Api.Host,
|
||||
"host_port": c.Api.Port,
|
||||
}).Info("configuring internal webserver")
|
||||
|
||||
r := router.Configure()
|
||||
addr := fmt.Sprintf("%s:%d", c.Api.Host, c.Api.Port)
|
||||
|
||||
if c.Api.Ssl.Enabled {
|
||||
if useAutomaticTls && len(tlsHostname) > 0 {
|
||||
m := autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
Cache: autocert.DirCache(path.Join(c.System.RootDirectory, "/.tls-cache")),
|
||||
HostPolicy: autocert.HostWhitelist(tlsHostname),
|
||||
}
|
||||
|
||||
log.WithField("hostname", tlsHostname).
|
||||
Info("webserver is now listening with auto-TLS enabled; certifcates will be automatically generated by Let's Encrypt")
|
||||
|
||||
// We don't use the autotls runner here since we need to specify a port other than 443
|
||||
// to be using for SSL connections for Wings.
|
||||
s := &http.Server{Addr: addr, TLSConfig: m.TLSConfig(), Handler: r}
|
||||
|
||||
go http.ListenAndServe(":http", m.HTTPHandler(nil))
|
||||
if err := s.ListenAndServeTLS("", ""); err != nil {
|
||||
log.WithFields(log.Fields{"auto_tls": true, "tls_hostname": tlsHostname, "error": err}).
|
||||
Fatal("failed to configure HTTP server using auto-tls")
|
||||
os.Exit(1)
|
||||
}
|
||||
} else if c.Api.Ssl.Enabled {
|
||||
if err := r.RunTLS(addr, c.Api.Ssl.CertificateFile, c.Api.Ssl.KeyFile); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to configure HTTPS server")
|
||||
log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server")
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -80,6 +80,9 @@ type Configuration struct {
|
||||
// The location where the panel is running that this daemon should connect to
|
||||
// to collect data and send events.
|
||||
PanelLocation string `json:"remote" yaml:"remote"`
|
||||
|
||||
// AllowedMounts .
|
||||
AllowedMounts []string `json:"allowed_mounts" yaml:"allowed_mounts"`
|
||||
}
|
||||
|
||||
// Defines the configuration of the internal SFTP server.
|
||||
|
||||
@@ -39,6 +39,9 @@ type DockerConfiguration struct {
|
||||
// for containers run through the daemon.
|
||||
Network DockerNetworkConfiguration `json:"network" yaml:"network"`
|
||||
|
||||
// Domainname is the Docker domainname for all containers.
|
||||
Domainname string `default:"" json:"domainname" yaml:"domainname"`
|
||||
|
||||
// If true, container images will be updated when a server starts if there
|
||||
// is an update available. If false the daemon will not attempt updates and will
|
||||
// defer to the host system to manage image updates.
|
||||
|
||||
23
go.mod
23
go.mod
@@ -15,28 +15,29 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Jeffail/gabs/v2 v2.2.0
|
||||
github.com/Microsoft/go-winio v0.4.7 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/apex/log v1.3.0
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
|
||||
github.com/aws/aws-sdk-go v1.30.14 // indirect
|
||||
github.com/beevik/etree v1.1.0
|
||||
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929
|
||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
||||
github.com/containerd/containerd v1.3.6 // indirect
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
|
||||
github.com/creasty/defaults v1.3.0
|
||||
github.com/docker/cli v17.12.1-ce-rc2+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/docker v0.0.0-20180422163414-57142e89befe
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.3.3 // indirect
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/gabriel-vasile/mimetype v0.1.4
|
||||
github.com/gammazero/workerpool v0.0.0-20200608033439-1a5ca90a5753
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/gin-gonic/gin v1.6.2
|
||||
github.com/gin-gonic/gin v1.6.3
|
||||
github.com/golang/protobuf v1.3.5 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/mux v1.7.4 // indirect
|
||||
github.com/gorilla/websocket v1.4.0
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334
|
||||
github.com/icza/dyno v0.0.0-20200205103839-49cb13720835
|
||||
github.com/imdario/mergo v0.3.8
|
||||
@@ -46,8 +47,7 @@ require (
|
||||
github.com/mattn/go-shellwords v1.0.10 // indirect
|
||||
github.com/mholt/archiver/v3 v3.3.0
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
@@ -55,15 +55,14 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/profile v1.4.0
|
||||
github.com/pkg/sftp v1.11.0 // indirect
|
||||
github.com/pterodactyl/sftp-server v1.1.2
|
||||
github.com/pterodactyl/sftp-server v1.1.4
|
||||
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94
|
||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||
github.com/spf13/cobra v0.0.7
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/yuin/goldmark v1.1.30 // indirect
|
||||
github.com/stretchr/testify v1.5.1 // indirect
|
||||
go.uber.org/zap v1.15.0
|
||||
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 // indirect
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
|
||||
|
||||
92
go.sum
92
go.sum
@@ -11,8 +11,6 @@ github.com/Microsoft/go-winio v0.4.7 h1:vOvDiY/F1avSWlCWiKJjdYKz2jVjTK3pWPHndeG4
|
||||
github.com/Microsoft/go-winio v0.4.7/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=
|
||||
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -27,20 +25,22 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.30.14 h1:vZfX2b/fknc9wKcytbLWykM7in5k6dbQ8iHTJDUP1Ng=
|
||||
github.com/aws/aws-sdk-go v1.30.14/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
|
||||
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929 h1:MW/JDk68Rny52yI0M0N+P8lySNgB+NhpI/uAmhgOhUM=
|
||||
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249 h1:R0IDH8daQ3lODvu8YtxnIqqth5qMGCJyADoUQvmLx4o=
|
||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249/go.mod h1:EHKW9yNEYSBpTKzuu7Y9oOrft/UlzH57rMIB03oev6M=
|
||||
github.com/containerd/containerd v1.3.6 h1:SMfcKoQyWhaRsYq7290ioC6XFcHDNcHvcEMjF6ORpac=
|
||||
github.com/containerd/containerd v1.3.6/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
@@ -56,12 +56,16 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/cli v17.12.1-ce-rc2+incompatible h1:ESUycEAqvFuLglAHkUW66rCc2djYtd3i1x231svLq9o=
|
||||
github.com/docker/cli v17.12.1-ce-rc2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.0.0-20180422163414-57142e89befe h1:VW8TnWi0CZgg7oCv0wH6evNwkzcJg/emnw4HrVIWws4=
|
||||
github.com/docker/docker v0.0.0-20180422163414-57142e89befe/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible h1:iWPIG7pWIsCwT6ZtHnTUpoVMnete7O/pzd9HFE3+tn8=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
|
||||
@@ -75,14 +79,18 @@ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v0.1.4 h1:5mcsq3+DXypREUkW+1juhjeKmE/XnWgs+paHMJn7lf8=
|
||||
github.com/gabriel-vasile/mimetype v0.1.4/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI=
|
||||
github.com/gammazero/deque v0.0.0-20200227231300-1e9af0e52b46 h1:iX4+rD9Fjdx8SkmSO/O5WAIX/j79ll3kuqv5VdYt9J8=
|
||||
github.com/gammazero/deque v0.0.0-20200227231300-1e9af0e52b46/go.mod h1:D90+MBHVc9Sk1lJAbEVgws0eYEurY4mv2TDso3Nxh3w=
|
||||
github.com/gammazero/workerpool v0.0.0-20200608033439-1a5ca90a5753 h1:oSQ61LxZkz3Z4La0O5cbyVDvLWEfbNgiD43cSPdjPQQ=
|
||||
github.com/gammazero/workerpool v0.0.0-20200608033439-1a5ca90a5753/go.mod h1:/XWO2YAUUpPi3smDlFBl0vpX0JHwUomDM/oRMwRmnSs=
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0 h1:7KeiSrO5puFH1+vdAdbpiie2TrNnkvFc/eOQzT60Z2k=
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0/go.mod h1:D1+3UtCYAJ1os1PI+zhTVEj6Tb+IHJvXjXKz83OstmM=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.6.2 h1:88crIK23zO6TqlQBt+f9FrPJNKm9ZEr7qjp9vl/d5TM=
|
||||
github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
|
||||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
@@ -95,7 +103,6 @@ github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD87
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
|
||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
@@ -108,6 +115,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
@@ -124,10 +132,10 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
@@ -144,10 +152,10 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
|
||||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
@@ -198,6 +206,7 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
@@ -213,6 +222,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
@@ -240,25 +251,31 @@ github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
|
||||
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
|
||||
github.com/pkg/sftp v1.8.3 h1:9jSe2SxTM8/3bXZjtqnkgTBW+lA8db0knZJyns7gpBA=
|
||||
github.com/pkg/sftp v1.8.3/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||
github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.11.0 h1:4Zv0OGbpkg4yNuUtH0s8rvoYxRCNyT29NVUo6pgPmxI=
|
||||
github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/pterodactyl/sftp-server v1.1.1 h1:IjuOy21BNZxfejKnXG1RgLxXAYylDqBVpbKZ6+fG5FQ=
|
||||
github.com/pterodactyl/sftp-server v1.1.1/go.mod h1:b1VVWYv0RF9rxSZQqaD/rYXriiRMNPsbV//CKMXR4ag=
|
||||
github.com/pterodactyl/sftp-server v1.1.2 h1:5bI9upe0kBRn9ALDabn9S2GVU5gkYvSErYgs32dAKjk=
|
||||
github.com/pterodactyl/sftp-server v1.1.2/go.mod h1:KjSONrenRr1oCh94QIVAU6yEzMe+Hd7r/JHrh5/oQHs=
|
||||
github.com/pterodactyl/sftp-server v1.1.4 h1:JESuEuZ+d2tajMjuQblPOlGISM9Uc2xOzk7irVF9PQ0=
|
||||
github.com/pterodactyl/sftp-server v1.1.4/go.mod h1:KjSONrenRr1oCh94QIVAU6yEzMe+Hd7r/JHrh5/oQHs=
|
||||
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce h1:aP+C+YbHZfOQlutA4p4soHi7rVUqHQdWEVMSkHfDTqY=
|
||||
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
@@ -273,6 +290,7 @@ github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||
@@ -290,7 +308,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
@@ -298,6 +315,7 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA=
|
||||
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
|
||||
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
|
||||
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
|
||||
@@ -317,21 +335,14 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMx
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM=
|
||||
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
|
||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
@@ -339,8 +350,6 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@@ -352,10 +361,6 @@ golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8=
|
||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79 h1:IaQbIIB2X/Mp/DKctl6ROxz1KyMlKp4uyvL6+kQ7C88=
|
||||
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -363,8 +368,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
@@ -377,11 +381,9 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 h1:Jcxah/M+oLZ/R4/z5RzfPzGbPXnVDPkEDtf2JnuxN+U=
|
||||
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -403,13 +405,10 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
|
||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI=
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
@@ -429,13 +428,9 @@ golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290 h1:NXNmtp0ToD36cui5IqWy95LC4Y6vT/4y3RnPxlQPinU=
|
||||
golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b h1:zSzQJAznWxAh9fZxiPy2FZo+ZZEYoYFYYDYdOrU7AaM=
|
||||
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5 h1:MeC2gMlMdkd67dn17MEby3rGXRxZtWeiRXOnISfTQ74=
|
||||
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools/gopls v0.1.3/go.mod h1:vrCQzOKxvuiZLjCKSmbbov04oeBQQOb4VQqwYK2PWIY=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
@@ -444,8 +439,10 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -471,4 +468,5 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
|
||||
@@ -29,12 +29,10 @@ func New(data []byte) (*Installer, error) {
|
||||
return nil, NewValidationError("service egg provided was not in a valid format")
|
||||
}
|
||||
|
||||
s := &server.Server{
|
||||
cfg := &server.Configuration{
|
||||
Uuid: getString(data, "uuid"),
|
||||
Suspended: false,
|
||||
State: server.ProcessOfflineState,
|
||||
Invocation: getString(data, "invocation"),
|
||||
EnvVars: make(map[string]string),
|
||||
Build: server.BuildSettings{
|
||||
MemoryLimit: getInt(data, "build", "memory"),
|
||||
Swap: getInt(data, "build", "swap"),
|
||||
@@ -43,20 +41,18 @@ func New(data []byte) (*Installer, error) {
|
||||
DiskSpace: getInt(data, "build", "disk"),
|
||||
Threads: getString(data, "build", "threads"),
|
||||
},
|
||||
Allocations: server.Allocations{
|
||||
Mappings: make(map[string][]int),
|
||||
},
|
||||
CrashDetectionEnabled: true,
|
||||
}
|
||||
|
||||
s.Allocations.DefaultMapping.Ip = getString(data, "allocations", "default", "ip")
|
||||
s.Allocations.DefaultMapping.Port = int(getInt(data, "allocations", "default", "port"))
|
||||
cfg.Allocations.DefaultMapping.Ip = getString(data, "allocations", "default", "ip")
|
||||
cfg.Allocations.DefaultMapping.Port = int(getInt(data, "allocations", "default", "port"))
|
||||
|
||||
// Unmarshal the environment variables from the request into the server struct.
|
||||
if b, _, _, err := jsonparser.Get(data, "environment"); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
} else {
|
||||
s.EnvVars = make(map[string]string)
|
||||
if err := json.Unmarshal(b, &s.EnvVars); err != nil {
|
||||
cfg.EnvVars = make(server.EnvironmentVariables)
|
||||
if err := json.Unmarshal(b, &cfg.EnvVars); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
@@ -65,15 +61,15 @@ func New(data []byte) (*Installer, error) {
|
||||
if b, _, _, err := jsonparser.Get(data, "allocations", "mappings"); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
} else {
|
||||
s.Allocations.Mappings = make(map[string][]int)
|
||||
if err := json.Unmarshal(b, &s.Allocations.Mappings); err != nil {
|
||||
cfg.Allocations.Mappings = make(map[string][]int)
|
||||
if err := json.Unmarshal(b, &cfg.Allocations.Mappings); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
s.Container.Image = getString(data, "container", "image")
|
||||
cfg.Container.Image = getString(data, "container", "image")
|
||||
|
||||
c, rerr, err := api.NewRequester().GetServerConfiguration(s.Uuid)
|
||||
c, rerr, err := api.NewRequester().GetServerConfiguration(cfg.Uuid)
|
||||
if err != nil || rerr != nil {
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
@@ -82,21 +78,18 @@ func New(data []byte) (*Installer, error) {
|
||||
return nil, errors.New(rerr.String())
|
||||
}
|
||||
|
||||
// Destroy the temporary server instance.
|
||||
s = nil
|
||||
|
||||
// Create a new server instance using the configuration we wrote to the disk
|
||||
// so that everything gets instantiated correctly on the struct.
|
||||
s2, err := server.FromConfiguration(c)
|
||||
s, err := server.FromConfiguration(c)
|
||||
|
||||
return &Installer{
|
||||
server: s2,
|
||||
server: s,
|
||||
}, err
|
||||
}
|
||||
|
||||
// Returns the UUID associated with this installer instance.
|
||||
func (i *Installer) Uuid() string {
|
||||
return i.server.Uuid
|
||||
return i.server.Id()
|
||||
}
|
||||
|
||||
// Return the server instance.
|
||||
|
||||
@@ -69,25 +69,44 @@ func (h *Handler) HandleLog(e *log.Entry) error {
|
||||
continue
|
||||
}
|
||||
|
||||
if err, ok := e.Fields.Get("error").(error); ok {
|
||||
var br = color2.New(color2.Bold, color2.FgRed)
|
||||
|
||||
if e, ok := errors.Cause(err).(tracer); ok {
|
||||
st := e.StackTrace()
|
||||
|
||||
l := len(st)
|
||||
if l > 5 {
|
||||
l = 5
|
||||
}
|
||||
|
||||
fmt.Fprintf(h.Writer, "\n%s%+v\n\n", br.Sprintf("Stacktrace:"), st[0:l])
|
||||
if err, ok := e.Fields.Get("error").(error); ok {
|
||||
fmt.Fprintf(h.Writer, "\n%s%+v\n\n", br.Sprintf("Stacktrace:"), getErrorStack(err, false))
|
||||
} else {
|
||||
fmt.Fprintf(h.Writer, "\n%s\n%+v\n\n", br.Sprintf("Stacktrace:"), err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("\n\nINVALID ERROR\n\n")
|
||||
fmt.Fprintf(h.Writer, "\n%s%+v\n\n", br.Sprintf("Invalid Error:"), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getErrorStack(err error, i bool) errors.StackTrace {
|
||||
e, ok := errors.Cause(err).(tracer)
|
||||
if !ok {
|
||||
if i {
|
||||
// Just abort out of this and return a stacktrace leading up to this point. It isn't perfect
|
||||
// but it'll at least include what function lead to this being called which we can then handle.
|
||||
return errors.Wrap(err, "failed to generate stacktrace for caught error").(tracer).StackTrace()
|
||||
}
|
||||
|
||||
return getErrorStack(errors.New(err.Error()), true)
|
||||
}
|
||||
|
||||
st := e.StackTrace()
|
||||
|
||||
l := len(st)
|
||||
// If this was an internal stack generation we're going to skip over the top four items in the stack
|
||||
// trace since they'll point to the error that was generated by this function.
|
||||
f := 0
|
||||
if i {
|
||||
f = 4
|
||||
}
|
||||
|
||||
if i && l > 9 {
|
||||
l = 9
|
||||
} else if !i && l > 5 {
|
||||
l = 5
|
||||
}
|
||||
|
||||
return st[f:l]
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func TrackedError(err error) *RequestError {
|
||||
// generated this server for the purposes of logging.
|
||||
func TrackedServerError(err error, s *server.Server) *RequestError {
|
||||
return &RequestError{
|
||||
Err: err,
|
||||
Err: errors.WithStack(err),
|
||||
Uuid: uuid.Must(uuid.NewRandom()).String(),
|
||||
Message: "",
|
||||
server: s,
|
||||
@@ -61,6 +61,8 @@ func (e *RequestError) AbortWithStatus(status int, c *gin.Context) {
|
||||
// If this error is because the resource does not exist, we likely do not need to log
|
||||
// the error anywhere, just return a 404 and move on with our lives.
|
||||
if os.IsNotExist(e.Err) {
|
||||
e.logger().WithField("error", e.Err).Debug("encountered os.IsNotExist error while handling request")
|
||||
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested resource was not found on the system.",
|
||||
})
|
||||
|
||||
@@ -48,7 +48,7 @@ func AuthorizationMiddleware(c *gin.Context) {
|
||||
// Helper function to fetch a server out of the servers collection stored in memory.
|
||||
func GetServer(uuid string) *server.Server {
|
||||
return server.GetServers().Find(func(s *server.Server) bool {
|
||||
return uuid == s.Uuid
|
||||
return uuid == s.Id()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -78,11 +78,13 @@ func Configure() *gin.Engine {
|
||||
{
|
||||
files.GET("/contents", getServerFileContents)
|
||||
files.GET("/list-directory", getServerListDirectory)
|
||||
files.PUT("/rename", putServerRenameFile)
|
||||
files.PUT("/rename", putServerRenameFiles)
|
||||
files.POST("/copy", postServerCopyFile)
|
||||
files.POST("/write", postServerWriteFile)
|
||||
files.POST("/create-directory", postServerCreateDirectory)
|
||||
files.POST("/delete", postServerDeleteFile)
|
||||
files.POST("/delete", postServerDeleteFiles)
|
||||
files.POST("/compress", postServerCompressFiles)
|
||||
files.POST("/decompress", postServerDecompressFiles)
|
||||
}
|
||||
|
||||
backup := server.Group("/backup")
|
||||
|
||||
@@ -64,7 +64,7 @@ func postServerPower(c *gin.Context) {
|
||||
//
|
||||
// We don't really care about any of the other actions at this point, they'll all result
|
||||
// in the process being stopped, which should have happened anyways if the server is suspended.
|
||||
if (data.Action == "start" || data.Action == "restart") && s.Suspended {
|
||||
if (data.Action == "start" || data.Action == "restart") && s.IsSuspended() {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Cannot start or restart a server that is suspended.",
|
||||
})
|
||||
@@ -135,7 +135,7 @@ func postServerInstall(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
go func(serv *server.Server) {
|
||||
if err := serv.Install(); err != nil {
|
||||
if err := serv.Install(true); err != nil {
|
||||
serv.Log().WithField("error", err).Error("failed to execute server installation process")
|
||||
}
|
||||
}(s)
|
||||
@@ -162,7 +162,7 @@ func deleteServer(c *gin.Context) {
|
||||
|
||||
// Immediately suspend the server to prevent a user from attempting
|
||||
// to start it while this process is running.
|
||||
s.Suspended = true
|
||||
s.Config().SetSuspended(true)
|
||||
|
||||
// If the server is currently installing, abort it.
|
||||
if s.IsInstalling() {
|
||||
@@ -200,9 +200,9 @@ func deleteServer(c *gin.Context) {
|
||||
}
|
||||
}(s.Filesystem.Path())
|
||||
|
||||
var uuid = s.Uuid
|
||||
var uuid = s.Id()
|
||||
server.GetServers().Remove(func(s2 *server.Server) bool {
|
||||
return s2.Uuid == uuid
|
||||
return s2.Id() == uuid
|
||||
})
|
||||
|
||||
// Deallocate the reference to this server.
|
||||
|
||||
@@ -2,10 +2,14 @@ package router
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -74,6 +78,13 @@ func getServerListDirectory(c *gin.Context) {
|
||||
|
||||
stats, err := s.Filesystem.ListDirectory(d)
|
||||
if err != nil {
|
||||
if err.Error() == "readdirent: not a directory" {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested directory does not exist.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
@@ -81,27 +92,49 @@ func getServerListDirectory(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, stats)
|
||||
}
|
||||
|
||||
// Renames (or moves) a file for a server.
|
||||
func putServerRenameFile(c *gin.Context) {
|
||||
type renameFile struct {
|
||||
To string `json:"to"`
|
||||
From string `json:"from"`
|
||||
}
|
||||
|
||||
// Renames (or moves) files for a server.
|
||||
func putServerRenameFiles(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
var data struct {
|
||||
RenameFrom string `json:"rename_from"`
|
||||
RenameTo string `json:"rename_to"`
|
||||
Root string `json:"root"`
|
||||
Files []renameFile `json:"files"`
|
||||
}
|
||||
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if data.RenameFrom == "" || data.RenameTo == "" {
|
||||
if len(data.Files) == 0 {
|
||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||
"error": "Invalid paths were provided, did you forget to provide both a new and old path?",
|
||||
"error": "No files to move or rename were provided.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.Filesystem.Rename(data.RenameFrom, data.RenameTo); err != nil {
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
|
||||
// Loop over the array of files passed in and perform the move or rename action against each.
|
||||
for _, p := range data.Files {
|
||||
pf := path.Join(data.Root, p.From)
|
||||
pt := path.Join(data.Root, p.To)
|
||||
|
||||
g.Go(func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
return s.Filesystem.Rename(pf, pt)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
@@ -129,19 +162,44 @@ func postServerCopyFile(c *gin.Context) {
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// Deletes a server file.
|
||||
func postServerDeleteFile(c *gin.Context) {
|
||||
// Deletes files from a server.
|
||||
func postServerDeleteFiles(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
var data struct {
|
||||
Location string `json:"location"`
|
||||
Root string `json:"root"`
|
||||
Files []string `json:"files"`
|
||||
}
|
||||
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.Filesystem.Delete(data.Location); err != nil {
|
||||
if len(data.Files) == 0 {
|
||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||
"error": "No files were specififed for deletion.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
|
||||
// Loop over the array of files passed in and delete them. If any of the file deletions
|
||||
// fail just abort the process entirely.
|
||||
for _, p := range data.Files {
|
||||
pi := path.Join(data.Root, p)
|
||||
|
||||
g.Go(func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
return s.Filesystem.Delete(pi)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
@@ -188,3 +246,74 @@ func postServerCreateDirectory(c *gin.Context) {
|
||||
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func postServerCompressFiles(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
var data struct {
|
||||
RootPath string `json:"root"`
|
||||
Files []string `json:"files"`
|
||||
}
|
||||
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(data.Files) == 0 {
|
||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||
"error": "No files were passed through to be compressed.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if !s.Filesystem.HasSpaceAvailable() {
|
||||
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
||||
"error": "This server does not have enough available disk space to generate a compressed archive.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
f, err := s.Filesystem.CompressFiles(data.RootPath, data.Files)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, &server.Stat{
|
||||
Info: f,
|
||||
Mimetype: "application/tar+gzip",
|
||||
})
|
||||
}
|
||||
|
||||
func postServerDecompressFiles(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
var data struct {
|
||||
RootPath string `json:"root"`
|
||||
File string `json:"file"`
|
||||
}
|
||||
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hasSpace, err := s.Filesystem.SpaceAvailableForDecompression(data.RootPath, data.File)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasSpace {
|
||||
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
||||
"error": "This server does not have enough available disk space to decompress this archive.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.Filesystem.DecompressFile(data.RootPath, data.File); err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -51,8 +51,10 @@ func getServerWebsocket(c *gin.Context) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := handler.HandleInbound(j); err != nil {
|
||||
handler.SendErrorJson(j, err)
|
||||
go func(msg websocket.Message) {
|
||||
if err := handler.HandleInbound(msg); err != nil {
|
||||
handler.SendErrorJson(msg, err)
|
||||
}
|
||||
}(j)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func postCreateServer(c *gin.Context) {
|
||||
go func(i *installer.Installer) {
|
||||
i.Execute()
|
||||
|
||||
if err := i.Server().Install(); err != nil {
|
||||
if err := i.Server().Install(false); err != nil {
|
||||
log.WithFields(log.Fields{"server": i.Uuid(), "error": err}).Error("failed to run install process for server")
|
||||
}
|
||||
}(install)
|
||||
|
||||
@@ -98,33 +98,33 @@ func postServerArchive(c *gin.Context) {
|
||||
start := time.Now()
|
||||
|
||||
if err := server.Archiver.Archive(); err != nil {
|
||||
zap.S().Errorw("failed to get archive for server", zap.String("server", s.Uuid), zap.Error(err))
|
||||
zap.S().Errorw("failed to get archive for server", zap.String("server", server.Id()), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Debugw(
|
||||
"successfully created archive for server",
|
||||
zap.String("server", server.Uuid),
|
||||
zap.String("server", server.Id()),
|
||||
zap.Duration("time", time.Now().Sub(start).Round(time.Microsecond)),
|
||||
)
|
||||
|
||||
r := api.NewRequester()
|
||||
rerr, err := r.SendArchiveStatus(server.Uuid, true)
|
||||
rerr, err := r.SendArchiveStatus(server.Id(), true)
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
zap.S().Errorw("failed to notify panel with archive status", zap.String("server", server.Uuid), zap.Error(err))
|
||||
zap.S().Errorw("failed to notify panel with archive status", zap.String("server", server.Id()), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Errorw(
|
||||
"panel returned an error when sending the archive status",
|
||||
zap.String("server", server.Uuid),
|
||||
zap.String("server", server.Id()),
|
||||
zap.Error(errors.New(rerr.String())),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
zap.S().Debugw("successfully notified panel about archive status", zap.String("server", server.Uuid))
|
||||
zap.S().Debugw("successfully notified panel about archive status", zap.String("server", server.Id()))
|
||||
}(s)
|
||||
|
||||
c.Status(http.StatusAccepted)
|
||||
|
||||
@@ -4,10 +4,13 @@ import (
|
||||
"encoding/json"
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WebsocketPayload struct {
|
||||
jwt.Payload
|
||||
sync.RWMutex
|
||||
|
||||
UserID json.Number `json:"user_id"`
|
||||
ServerUUID string `json:"server_uuid"`
|
||||
Permissions []string `json:"permissions"`
|
||||
@@ -15,11 +18,24 @@ type WebsocketPayload struct {
|
||||
|
||||
// Returns the JWT payload.
|
||||
func (p *WebsocketPayload) GetPayload() *jwt.Payload {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
return &p.Payload
|
||||
}
|
||||
|
||||
func (p *WebsocketPayload) GetServerUuid() string {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
return p.ServerUUID
|
||||
}
|
||||
|
||||
// Checks if the given token payload has a permission string.
|
||||
func (p *WebsocketPayload) HasPermission(permission string) bool {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
|
||||
for _, k := range p.Permissions {
|
||||
if k == permission || (!strings.HasPrefix(permission, "admin") && k == "*") {
|
||||
return true
|
||||
|
||||
@@ -127,7 +127,7 @@ func (h *Handler) TokenValid() error {
|
||||
return errors.New("jwt does not have connect permission")
|
||||
}
|
||||
|
||||
if h.server.Uuid != j.ServerUUID {
|
||||
if h.server.Id() != j.GetServerUuid() {
|
||||
return errors.New("jwt server uuid mismatch")
|
||||
}
|
||||
|
||||
@@ -247,16 +247,7 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
if state == server.ProcessOfflineState {
|
||||
_ = h.server.Filesystem.HasSpaceAvailable()
|
||||
|
||||
resources := server.ResourceUsage{
|
||||
Memory: 0,
|
||||
MemoryLimit: 0,
|
||||
CpuAbsolute: 0.0,
|
||||
Disk: h.server.Resources.Disk,
|
||||
}
|
||||
resources.Network.RxBytes = 0
|
||||
resources.Network.TxBytes = 0
|
||||
|
||||
b, _ := json.Marshal(resources)
|
||||
b, _ := json.Marshal(h.server.Proc())
|
||||
h.SendJson(&Message{
|
||||
Event: server.StatsEvent,
|
||||
Args: []string{string(b)},
|
||||
@@ -280,11 +271,14 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
break
|
||||
case "restart":
|
||||
if h.GetJwt().HasPermission(PermissionSendPowerRestart) {
|
||||
if err := h.server.Environment.WaitForStop(60, false); err != nil {
|
||||
return err
|
||||
// If the server is alreay restarting don't do anything. Perhaps we send back an event
|
||||
// in the future for this? For now no reason to knowingly trigger an error by trying to
|
||||
// restart a process already restarting.
|
||||
if h.server.Environment.IsRestarting() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return h.server.Environment.Start()
|
||||
return h.server.Environment.Restart()
|
||||
}
|
||||
break
|
||||
case "kill":
|
||||
|
||||
17
server/allocations.go
Normal file
17
server/allocations.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package server
|
||||
|
||||
// Defines the allocations available for a given server. When using the Docker environment
|
||||
// driver these correspond to mappings for the container that allow external connections.
|
||||
type Allocations struct {
|
||||
// Defines the default allocation that should be used for this server. This is
|
||||
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
||||
// files or the startup arguments for a server.
|
||||
DefaultMapping struct {
|
||||
Ip string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
} `json:"default"`
|
||||
|
||||
// Mappings contains all of the ports that should be assigned to a given server
|
||||
// attached to the IP they correspond to.
|
||||
Mappings map[string][]int `json:"mappings"`
|
||||
}
|
||||
@@ -23,7 +23,7 @@ func (a *Archiver) ArchivePath() string {
|
||||
|
||||
// ArchiveName returns the name of the server's archive.
|
||||
func (a *Archiver) ArchiveName() string {
|
||||
return a.Server.Uuid + ".tar.gz"
|
||||
return a.Server.Id() + ".tar.gz"
|
||||
}
|
||||
|
||||
// Exists returns a boolean based off if the archive exists.
|
||||
@@ -52,7 +52,12 @@ func (a *Archiver) Archive() error {
|
||||
}
|
||||
|
||||
for _, file := range fileInfo {
|
||||
files = append(files, filepath.Join(path, file.Name()))
|
||||
f, err := a.Server.Filesystem.SafeJoin(path, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files = append(files, f)
|
||||
}
|
||||
|
||||
stat, err := a.Stat()
|
||||
|
||||
@@ -20,11 +20,11 @@ type Archive struct {
|
||||
Files *IncludedFiles
|
||||
}
|
||||
|
||||
// Creates an archive at dest with all of the files definied in the included files struct.
|
||||
func (a *Archive) Create(dest string, ctx context.Context) error {
|
||||
f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
// Creates an archive at dst with all of the files defined in the included files struct.
|
||||
func (a *Archive) Create(dst string, ctx context.Context) (os.FileInfo, error) {
|
||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
@@ -66,14 +66,19 @@ func (a *Archive) Create(dest string, ctx context.Context) error {
|
||||
|
||||
// Attempt to remove the archive if there is an error, report that error to
|
||||
// the logger if it fails.
|
||||
if rerr := os.Remove(dest); rerr != nil && !os.IsNotExist(rerr) {
|
||||
log.WithField("location", dest).Warn("failed to delete corrupted backup archive")
|
||||
if rerr := os.Remove(dst); rerr != nil && !os.IsNotExist(rerr) {
|
||||
log.WithField("location", dst).Warn("failed to delete corrupted backup archive")
|
||||
}
|
||||
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
st, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// Adds a single file to the existing tar archive writer.
|
||||
@@ -99,7 +104,7 @@ func (a *Archive) addToArchive(p string, s *os.FileInfo, w *tar.Writer) error {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
if err = w.WriteHeader(header); err != nil {
|
||||
if err := w.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ func (b *LocalBackup) Generate(included *IncludedFiles, prefix string) (*Archive
|
||||
Files: included,
|
||||
}
|
||||
|
||||
if err := a.Create(b.Path(), context.Background()); err != nil {
|
||||
if _, err := a.Create(b.Path(), context.Background()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ func (s *S3Backup) Generate(included *IncludedFiles, prefix string) (*ArchiveDet
|
||||
Files: included,
|
||||
}
|
||||
|
||||
if err := a.Create(s.Path(), context.Background()); err != nil {
|
||||
if _, err := a.Create(s.Path(), context.Background()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
72
server/build_settings.go
Normal file
72
server/build_settings.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package server
|
||||
|
||||
import "math"
|
||||
|
||||
// The build settings for a given server that impact docker container creation and
|
||||
// resource limits for a server instance.
|
||||
type BuildSettings struct {
|
||||
// The total amount of memory in megabytes that this server is allowed to
|
||||
// use on the host system.
|
||||
MemoryLimit int64 `json:"memory_limit"`
|
||||
|
||||
// The amount of additional swap space to be provided to a container instance.
|
||||
Swap int64 `json:"swap"`
|
||||
|
||||
// The relative weight for IO operations in a container. This is relative to other
|
||||
// containers on the system and should be a value between 10 and 1000.
|
||||
IoWeight uint16 `json:"io_weight"`
|
||||
|
||||
// The percentage of CPU that this instance is allowed to consume relative to
|
||||
// the host. A value of 200% represents complete utilization of two cores. This
|
||||
// should be a value between 1 and THREAD_COUNT * 100.
|
||||
CpuLimit int64 `json:"cpu_limit"`
|
||||
|
||||
// The amount of disk space in megabytes that a server is allowed to use.
|
||||
DiskSpace int64 `json:"disk_space"`
|
||||
|
||||
// Sets which CPU threads can be used by the docker instance.
|
||||
Threads string `json:"threads"`
|
||||
}
|
||||
|
||||
func (s *Server) Build() *BuildSettings {
|
||||
return &s.Config().Build
|
||||
}
|
||||
|
||||
// Converts the CPU limit for a server build into a number that can be better understood
|
||||
// by the Docker environment. If there is no limit set, return -1 which will indicate to
|
||||
// Docker that it has unlimited CPU quota.
|
||||
func (b *BuildSettings) ConvertedCpuLimit() int64 {
|
||||
if b.CpuLimit == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return b.CpuLimit * 1000
|
||||
}
|
||||
|
||||
// Set the hard limit for memory usage to be 5% more than the amount of memory assigned to
|
||||
// the server. If the memory limit for the server is < 4G, use 10%, if less than 2G use
|
||||
// 15%. This avoids unexpected crashes from processes like Java which run over the limit.
|
||||
func (b *BuildSettings) MemoryOverheadMultiplier() float64 {
|
||||
if b.MemoryLimit <= 2048 {
|
||||
return 1.15
|
||||
} else if b.MemoryLimit <= 4096 {
|
||||
return 1.10
|
||||
}
|
||||
|
||||
return 1.05
|
||||
}
|
||||
|
||||
func (b *BuildSettings) BoundedMemoryLimit() int64 {
|
||||
return int64(math.Round(float64(b.MemoryLimit) * b.MemoryOverheadMultiplier() * 1_000_000))
|
||||
}
|
||||
|
||||
// Returns the amount of swap available as a total in bytes. This is returned as the amount
|
||||
// of memory available to the server initially, PLUS the amount of additional swap to include
|
||||
// which is the format used by Docker.
|
||||
func (b *BuildSettings) ConvertedSwap() int64 {
|
||||
if b.Swap < 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return (b.Swap * 1_000_000) + b.BoundedMemoryLimit()
|
||||
}
|
||||
@@ -10,7 +10,8 @@ import (
|
||||
func (s *Server) UpdateConfigurationFiles() {
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
for _, v := range s.processConfiguration.ConfigurationFiles {
|
||||
files := s.ProcessConfiguration().ConfigurationFiles
|
||||
for _, v := range files {
|
||||
wg.Add(1)
|
||||
|
||||
go func(f parser.ConfigurationFile, server *Server) {
|
||||
|
||||
91
server/configuration.go
Normal file
91
server/configuration.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type EnvironmentVariables map[string]interface{}
|
||||
|
||||
// Ugly hacky function to handle environment variables that get passed through as not-a-string
|
||||
// from the Panel. Ideally we'd just say only pass strings, but that is a fragile idea and if a
|
||||
// string wasn't passed through you'd cause a crash or the server to become unavailable. For now
|
||||
// try to handle the most likely values from the JSON and hope for the best.
|
||||
func (ev EnvironmentVariables) Get(key string) string {
|
||||
val, ok := ev[key]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch val.(type) {
|
||||
case int:
|
||||
return strconv.Itoa(val.(int))
|
||||
case int32:
|
||||
return strconv.FormatInt(val.(int64), 10)
|
||||
case int64:
|
||||
return strconv.FormatInt(val.(int64), 10)
|
||||
case float32:
|
||||
return fmt.Sprintf("%f", val.(float32))
|
||||
case float64:
|
||||
return fmt.Sprintf("%f", val.(float64))
|
||||
case bool:
|
||||
return strconv.FormatBool(val.(bool))
|
||||
}
|
||||
|
||||
return val.(string)
|
||||
}
|
||||
|
||||
type Configuration struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// The unique identifier for the server that should be used when referencing
|
||||
// it against the Panel API (and internally). This will be used when naming
|
||||
// docker containers as well as in log output.
|
||||
Uuid string `json:"uuid"`
|
||||
|
||||
// Whether or not the server is in a suspended state. Suspended servers cannot
|
||||
// be started or modified except in certain scenarios by an admin user.
|
||||
Suspended bool `json:"suspended"`
|
||||
|
||||
// The command that should be used when booting up the server instance.
|
||||
Invocation string `json:"invocation"`
|
||||
|
||||
// An array of environment variables that should be passed along to the running
|
||||
// server process.
|
||||
EnvVars EnvironmentVariables `json:"environment"`
|
||||
|
||||
Allocations Allocations `json:"allocations"`
|
||||
Build BuildSettings `json:"build"`
|
||||
CrashDetectionEnabled bool `default:"true" json:"enabled" yaml:"enabled"`
|
||||
Mounts []Mount `json:"mounts"`
|
||||
Resources ResourceUsage `json:"resources"`
|
||||
|
||||
Container struct {
|
||||
// Defines the Docker image that will be used for this server
|
||||
Image string `json:"image,omitempty"`
|
||||
// If set to true, OOM killer will be disabled on the server's Docker container.
|
||||
// If not present (nil) we will default to disabling it.
|
||||
OomDisabled bool `default:"true" json:"oom_disabled"`
|
||||
} `json:"container,omitempty"`
|
||||
}
|
||||
|
||||
func (s *Server) Config() *Configuration {
|
||||
s.cfg.mu.RLock()
|
||||
defer s.cfg.mu.RUnlock()
|
||||
|
||||
return &s.cfg
|
||||
}
|
||||
|
||||
func (c *Configuration) GetUuid() string {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
return c.Uuid
|
||||
}
|
||||
|
||||
func (c *Configuration) SetSuspended(s bool) {
|
||||
c.mu.Lock()
|
||||
c.Suspended = s
|
||||
c.mu.Unlock()
|
||||
}
|
||||
@@ -4,18 +4,32 @@ import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type CrashDetection struct {
|
||||
// If set to false, the system will not listen for crash detection events that
|
||||
// can indicate that the server stopped unexpectedly.
|
||||
Enabled bool `default:"true" json:"enabled" yaml:"enabled"`
|
||||
type CrashHandler struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// Tracks the time of the last server crash event.
|
||||
lastCrash time.Time
|
||||
}
|
||||
|
||||
// Returns the time of the last crash for this server instance.
|
||||
func (cd *CrashHandler) LastCrashTime() time.Time {
|
||||
cd.mu.RLock()
|
||||
defer cd.mu.RUnlock()
|
||||
|
||||
return cd.lastCrash
|
||||
}
|
||||
|
||||
// Sets the last crash time for a server.
|
||||
func (cd *CrashHandler) SetLastCrash(t time.Time) {
|
||||
cd.mu.Lock()
|
||||
cd.lastCrash = t
|
||||
cd.mu.Unlock()
|
||||
}
|
||||
|
||||
// Looks at the environment exit state to determine if the process exited cleanly or
|
||||
// if it was the result of an event that we should try to recover from.
|
||||
//
|
||||
@@ -30,8 +44,8 @@ func (s *Server) handleServerCrash() error {
|
||||
// No point in doing anything here if the server isn't currently offline, there
|
||||
// is no reason to do a crash detection event. If the server crash detection is
|
||||
// disabled we want to skip anything after this as well.
|
||||
if s.GetState() != ProcessOfflineState || !s.CrashDetection.Enabled {
|
||||
if !s.CrashDetection.Enabled {
|
||||
if s.GetState() != ProcessOfflineState || !s.Config().CrashDetectionEnabled {
|
||||
if !s.Config().CrashDetectionEnabled {
|
||||
s.Log().Debug("server triggered crash detection but handler is disabled for server process")
|
||||
|
||||
s.PublishConsoleOutputFromDaemon("Server detected as crashed; crash detection is disabled for this instance.")
|
||||
@@ -57,7 +71,7 @@ func (s *Server) handleServerCrash() error {
|
||||
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Exit code: %d", exitCode))
|
||||
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Out of memory: %t", oomKilled))
|
||||
|
||||
c := s.CrashDetection.lastCrash
|
||||
c := s.crasher.LastCrashTime()
|
||||
// If the last crash time was within the last 60 seconds we do not want to perform
|
||||
// an automatic reboot of the process. Return an error that can be handled.
|
||||
if !c.IsZero() && c.Add(time.Second * 60).After(time.Now()) {
|
||||
@@ -66,7 +80,7 @@ func (s *Server) handleServerCrash() error {
|
||||
return &crashTooFrequent{}
|
||||
}
|
||||
|
||||
s.CrashDetection.lastCrash = time.Now()
|
||||
s.crasher.SetLastCrash(time.Now())
|
||||
|
||||
return s.Environment.Start()
|
||||
}
|
||||
@@ -31,6 +31,13 @@ type Environment interface {
|
||||
// not be returned.
|
||||
Stop() error
|
||||
|
||||
// Restart a server instance. If already stopped the process will be started. This function
|
||||
// will return an error if the server is already performing a restart process as to avoid
|
||||
// unnecessary double/triple/quad looping issues if multiple people press restart or spam the
|
||||
// button to restart.
|
||||
Restart() error
|
||||
IsRestarting() bool
|
||||
|
||||
// Waits for a server instance to stop gracefully. If the server is still detected
|
||||
// as running after seconds, an error will be returned, or the server will be terminated
|
||||
// depending on the value of the second argument.
|
||||
|
||||
@@ -16,15 +16,20 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Defines the base environment for Docker instances running through Wings.
|
||||
type DockerEnvironment struct {
|
||||
sync.RWMutex
|
||||
|
||||
Server *Server
|
||||
|
||||
// The Docker client being used for this instance.
|
||||
@@ -42,6 +47,25 @@ type DockerEnvironment struct {
|
||||
// Holds the stats stream used by the polling commands so that we can easily close
|
||||
// it out.
|
||||
stats io.ReadCloser
|
||||
|
||||
// Locks when we're performing a restart to avoid trying to restart a process that is already
|
||||
// being restarted.
|
||||
restartSem *semaphore.Weighted
|
||||
}
|
||||
|
||||
// Set if this process is currently attached to the process.
|
||||
func (d *DockerEnvironment) SetAttached(a bool) {
|
||||
d.Lock()
|
||||
d.attached = a
|
||||
d.Unlock()
|
||||
}
|
||||
|
||||
// Determine if the this process is currently attached to the container.
|
||||
func (d *DockerEnvironment) IsAttached() bool {
|
||||
d.RLock()
|
||||
defer d.RUnlock()
|
||||
|
||||
return d.attached
|
||||
}
|
||||
|
||||
// Creates a new base Docker environment. A server must still be attached to it.
|
||||
@@ -70,7 +94,7 @@ func (d *DockerEnvironment) Type() string {
|
||||
|
||||
// Determines if the container exists in this environment.
|
||||
func (d *DockerEnvironment) Exists() (bool, error) {
|
||||
_, err := d.Client.ContainerInspect(context.Background(), d.Server.Uuid)
|
||||
_, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||
|
||||
if err != nil {
|
||||
// If this error is because the container instance wasn't found via Docker we
|
||||
@@ -94,9 +118,7 @@ func (d *DockerEnvironment) Exists() (bool, error) {
|
||||
//
|
||||
// @see docker/client/errors.go
|
||||
func (d *DockerEnvironment) IsRunning() (bool, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := d.Client.ContainerInspect(ctx, d.Server.Uuid)
|
||||
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -108,7 +130,7 @@ func (d *DockerEnvironment) IsRunning() (bool, error) {
|
||||
// making any changes to the operational state of the container. This allows memory, cpu,
|
||||
// and IO limitations to be adjusted on the fly for individual instances.
|
||||
func (d *DockerEnvironment) InSituUpdate() error {
|
||||
if _, err := d.Client.ContainerInspect(context.Background(), d.Server.Uuid); err != nil {
|
||||
if _, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err != nil {
|
||||
// If the container doesn't exist for some reason there really isn't anything
|
||||
// we can do to fix that in this process (it doesn't make sense at least). In those
|
||||
// cases just return without doing anything since we still want to save the configuration
|
||||
@@ -122,13 +144,15 @@ func (d *DockerEnvironment) InSituUpdate() error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second * 10)
|
||||
u := container.UpdateConfig{
|
||||
Resources: d.getResourcesForServer(),
|
||||
}
|
||||
|
||||
d.Server.Log().WithField("limits", fmt.Sprintf("%+v", u.Resources)).Debug("updating server container on-the-fly with passed limits")
|
||||
if _, err := d.Client.ContainerUpdate(ctx, d.Server.Uuid, u); err != nil {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
if _, err := d.Client.ContainerUpdate(ctx, d.Server.Id(), u); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
@@ -154,7 +178,7 @@ func (d *DockerEnvironment) OnBeforeStart() error {
|
||||
|
||||
// Always destroy and re-create the server container to ensure that synced data from
|
||||
// the Panel is used.
|
||||
if err := d.Client.ContainerRemove(context.Background(), d.Server.Uuid, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
||||
if err := d.Client.ContainerRemove(context.Background(), d.Server.Id(), types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
||||
if !client.IsErrNotFound(err) {
|
||||
return err
|
||||
}
|
||||
@@ -198,11 +222,11 @@ func (d *DockerEnvironment) Start() error {
|
||||
// Theoretically you'd have the Panel handle all of this logic, but we cannot do that
|
||||
// because we allow the websocket to control the server power state as well, so we'll
|
||||
// need to handle that action in here.
|
||||
if d.Server.Suspended {
|
||||
if d.Server.IsSuspended() {
|
||||
return &suspendedError{}
|
||||
}
|
||||
|
||||
if c, err := d.Client.ContainerInspect(context.Background(), d.Server.Uuid); err != nil {
|
||||
if c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err != nil {
|
||||
// Do nothing if the container is not found, we just don't want to continue
|
||||
// to the next block of code here. This check was inlined here to guard againt
|
||||
// a nil-pointer when checking c.State below.
|
||||
@@ -254,8 +278,10 @@ func (d *DockerEnvironment) Start() error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second * 10)
|
||||
if err := d.Client.ContainerStart(ctx, d.Server.Uuid, types.ContainerStartOptions{}); err != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
if err := d.Client.ContainerStart(ctx, d.Server.Id(), types.ContainerStartOptions{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
@@ -268,19 +294,89 @@ func (d *DockerEnvironment) Start() error {
|
||||
// Stops the container that the server is running in. This will allow up to 10
|
||||
// seconds to pass before a failure occurs.
|
||||
func (d *DockerEnvironment) Stop() error {
|
||||
stop := d.Server.processConfiguration.Stop
|
||||
stop := d.Server.ProcessConfiguration().Stop
|
||||
if stop.Type == api.ProcessStopSignal {
|
||||
return d.Terminate(os.Kill)
|
||||
}
|
||||
|
||||
d.Server.SetState(ProcessStoppingState)
|
||||
if stop.Type == api.ProcessStopCommand {
|
||||
// Only attempt to send the stop command to the instance if we are actually attached to
|
||||
// the instance. If we are not for some reason, just send the container stop event.
|
||||
if d.IsAttached() && stop.Type == api.ProcessStopCommand {
|
||||
return d.SendCommand(stop.Value)
|
||||
}
|
||||
|
||||
t := time.Second * 10
|
||||
|
||||
return d.Client.ContainerStop(context.Background(), d.Server.Uuid, &t)
|
||||
err := d.Client.ContainerStop(context.Background(), d.Server.Id(), &t)
|
||||
if err != nil {
|
||||
// If the container does not exist just mark the process as stopped and return without
|
||||
// an error.
|
||||
if client.IsErrNotFound(err) {
|
||||
d.SetAttached(false)
|
||||
d.Server.SetState(ProcessOfflineState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try to acquire a lock to restart the server. If one cannot be obtained within 5 seconds return
|
||||
// an error to the caller. You should ideally be checking IsRestarting() before calling this function
|
||||
// to avoid unnecessary delays since you can respond immediately from that.
|
||||
func (d *DockerEnvironment) acquireRestartLock() error {
|
||||
if d.restartSem == nil {
|
||||
d.restartSem = semaphore.NewWeighted(1)
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
|
||||
|
||||
return d.restartSem.Acquire(ctx, 1)
|
||||
}
|
||||
|
||||
// Restarts the server process by waiting for the process to gracefully stop and then triggering a
|
||||
// start command. This will return an error if there is already a restart process executing for the
|
||||
// server. The lock is released when the process is stopped and a start has begun.
|
||||
func (d *DockerEnvironment) Restart() error {
|
||||
d.Server.Log().Debug("attempting to acquire restart lock...")
|
||||
if err := d.acquireRestartLock(); err != nil {
|
||||
d.Server.Log().Warn("failed to acquire restart lock; already acquired by a different process")
|
||||
return err
|
||||
}
|
||||
|
||||
d.Server.Log().Debug("acquired restart lock")
|
||||
|
||||
err := d.WaitForStop(60, false)
|
||||
if err != nil {
|
||||
d.restartSem.Release(1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Release the restart lock, it is now safe for someone to attempt restarting the server again.
|
||||
d.restartSem.Release(1)
|
||||
|
||||
// Start the process.
|
||||
return d.Start()
|
||||
}
|
||||
|
||||
// Check if the server is currently running the restart process by checking if there is a semaphore
|
||||
// allocated, and if so, if we can aquire a lock on it.
|
||||
func (d *DockerEnvironment) IsRestarting() bool {
|
||||
if d.restartSem == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if d.restartSem.TryAcquire(1) {
|
||||
d.restartSem.Release(1)
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Attempts to gracefully stop a server using the defined stop command. If the server
|
||||
@@ -301,7 +397,7 @@ func (d *DockerEnvironment) WaitForStop(seconds int, terminate bool) error {
|
||||
// Block the return of this function until the container as been marked as no
|
||||
// longer running. If this wait does not end by the time seconds have passed,
|
||||
// attempt to terminate the container, or return an error.
|
||||
ok, errChan := d.Client.ContainerWait(ctx, d.Server.Uuid, container.WaitConditionNotRunning)
|
||||
ok, errChan := d.Client.ContainerWait(ctx, d.Server.Id(), container.WaitConditionNotRunning)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
@@ -323,9 +419,7 @@ func (d *DockerEnvironment) WaitForStop(seconds int, terminate bool) error {
|
||||
|
||||
// Forcefully terminates the container using the signal passed through.
|
||||
func (d *DockerEnvironment) Terminate(signal os.Signal) error {
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := d.Client.ContainerInspect(ctx, d.Server.Uuid)
|
||||
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -337,19 +431,17 @@ func (d *DockerEnvironment) Terminate(signal os.Signal) error {
|
||||
d.Server.SetState(ProcessStoppingState)
|
||||
|
||||
return d.Client.ContainerKill(
|
||||
ctx, d.Server.Uuid, strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed"),
|
||||
context.Background(), d.Server.Id(), strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed"),
|
||||
)
|
||||
}
|
||||
|
||||
// Remove the Docker container from the machine. If the container is currently running
|
||||
// it will be forcibly stopped by Docker.
|
||||
func (d *DockerEnvironment) Destroy() error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Avoid crash detection firing off.
|
||||
d.Server.SetState(ProcessStoppingState)
|
||||
|
||||
err := d.Client.ContainerRemove(ctx, d.Server.Uuid, types.ContainerRemoveOptions{
|
||||
err := d.Client.ContainerRemove(context.Background(), d.Server.Id(), types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
RemoveLinks: false,
|
||||
Force: true,
|
||||
@@ -369,7 +461,7 @@ func (d *DockerEnvironment) Destroy() error {
|
||||
// Determine the container exit state and return the exit code and wether or not
|
||||
// the container was killed by the OOM killer.
|
||||
func (d *DockerEnvironment) ExitState() (uint32, bool, error) {
|
||||
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Uuid)
|
||||
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||
if err != nil {
|
||||
// I'm not entirely sure how this can happen to be honest. I tried deleting a
|
||||
// container _while_ a server was running and wings gracefully saw the crash and
|
||||
@@ -395,7 +487,7 @@ func (d *DockerEnvironment) ExitState() (uint32, bool, error) {
|
||||
// miss important output at the beginning because of the time delay with attaching to the
|
||||
// output.
|
||||
func (d *DockerEnvironment) Attach() error {
|
||||
if d.attached {
|
||||
if d.IsAttached() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -403,10 +495,8 @@ func (d *DockerEnvironment) Attach() error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var err error
|
||||
d.stream, err = d.Client.ContainerAttach(ctx, d.Server.Uuid, types.ContainerAttachOptions{
|
||||
d.stream, err = d.Client.ContainerAttach(context.Background(), d.Server.Id(), types.ContainerAttachOptions{
|
||||
Stdin: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
@@ -421,7 +511,7 @@ func (d *DockerEnvironment) Attach() error {
|
||||
Server: d.Server,
|
||||
}
|
||||
|
||||
d.attached = true
|
||||
d.SetAttached(true)
|
||||
go func() {
|
||||
if err := d.EnableResourcePolling(); err != nil {
|
||||
d.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to enable resource polling on server")
|
||||
@@ -432,7 +522,7 @@ func (d *DockerEnvironment) Attach() error {
|
||||
defer d.stream.Close()
|
||||
defer func() {
|
||||
d.Server.SetState(ProcessOfflineState)
|
||||
d.attached = false
|
||||
d.SetAttached(false)
|
||||
}()
|
||||
|
||||
io.Copy(console, d.stream.Reader)
|
||||
@@ -450,10 +540,9 @@ func (d *DockerEnvironment) FollowConsoleOutput() error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return errors.New(fmt.Sprintf("no such container: %s", d.Server.Uuid))
|
||||
return errors.New(fmt.Sprintf("no such container: %s", d.Server.Id()))
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
opts := types.ContainerLogsOptions{
|
||||
ShowStderr: true,
|
||||
ShowStdout: true,
|
||||
@@ -461,7 +550,7 @@ func (d *DockerEnvironment) FollowConsoleOutput() error {
|
||||
Since: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
reader, err := d.Client.ContainerLogs(ctx, d.Server.Uuid, opts)
|
||||
reader, err := d.Client.ContainerLogs(context.Background(), d.Server.Id(), opts)
|
||||
|
||||
go func(r io.ReadCloser) {
|
||||
defer r.Close()
|
||||
@@ -487,9 +576,7 @@ func (d *DockerEnvironment) EnableResourcePolling() error {
|
||||
return errors.New("cannot enable resource polling on a server that is not running")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
stats, err := d.Client.ContainerStats(ctx, d.Server.Uuid, true)
|
||||
stats, err := d.Client.ContainerStats(context.Background(), d.Server.Id(), true)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -516,20 +603,16 @@ func (d *DockerEnvironment) EnableResourcePolling() error {
|
||||
return
|
||||
}
|
||||
|
||||
s.Resources.CpuAbsolute = s.Resources.CalculateAbsoluteCpu(&v.PreCPUStats, &v.CPUStats)
|
||||
s.Resources.Memory = s.Resources.CalculateDockerMemory(v.MemoryStats)
|
||||
s.Resources.MemoryLimit = v.MemoryStats.Limit
|
||||
s.Proc().UpdateFromDocker(v)
|
||||
for _, nw := range v.Networks {
|
||||
s.Proc().UpdateNetworkBytes(&nw)
|
||||
}
|
||||
|
||||
// Why you ask? This already has the logic for caching disk space in use and then
|
||||
// also handles pushing that value to the resources object automatically.
|
||||
s.Filesystem.HasSpaceAvailable()
|
||||
|
||||
for _, nw := range v.Networks {
|
||||
s.Resources.Network.RxBytes += nw.RxBytes
|
||||
s.Resources.Network.TxBytes += nw.TxBytes
|
||||
}
|
||||
|
||||
b, _ := json.Marshal(s.Resources)
|
||||
b, _ := json.Marshal(s.Proc())
|
||||
s.Events().Publish(StatsEvent, string(b))
|
||||
}
|
||||
}(d.Server)
|
||||
@@ -544,15 +627,16 @@ func (d *DockerEnvironment) DisableResourcePolling() error {
|
||||
}
|
||||
|
||||
err := d.stats.Close()
|
||||
|
||||
d.Server.Resources.CpuAbsolute = 0
|
||||
d.Server.Resources.Memory = 0
|
||||
d.Server.Resources.Network.TxBytes = 0
|
||||
d.Server.Resources.Network.RxBytes = 0
|
||||
d.Server.Proc().Empty()
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Returns the image to be used for the instance.
|
||||
func (d *DockerEnvironment) Image() string {
|
||||
return d.Server.Config().Container.Image
|
||||
}
|
||||
|
||||
// Pulls the image from Docker. If there is an error while pulling the image from the source
|
||||
// but the image already exists locally, we will report that error to the logger but continue
|
||||
// with the process.
|
||||
@@ -563,15 +647,16 @@ func (d *DockerEnvironment) DisableResourcePolling() error {
|
||||
// correctly if anything.
|
||||
//
|
||||
// @todo handle authorization & local images
|
||||
func (d *DockerEnvironment) ensureImageExists(c *client.Client) error {
|
||||
func (d *DockerEnvironment) ensureImageExists() error {
|
||||
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
|
||||
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
|
||||
// an image. Let me know when I am inevitably wrong here...
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Minute*15)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
|
||||
defer cancel()
|
||||
|
||||
out, err := c.ImagePull(ctx, d.Server.Container.Image, types.ImagePullOptions{All: false})
|
||||
out, err := d.Client.ImagePull(ctx, d.Image(), types.ImagePullOptions{All: false})
|
||||
if err != nil {
|
||||
images, ierr := c.ImageList(ctx, types.ImageListOptions{})
|
||||
images, ierr := d.Client.ImageList(ctx, types.ImageListOptions{})
|
||||
if ierr != nil {
|
||||
// Well damn, something has gone really wrong here, just go ahead and abort there
|
||||
// isn't much anything we can do to try and self-recover from this.
|
||||
@@ -580,9 +665,12 @@ func (d *DockerEnvironment) ensureImageExists(c *client.Client) error {
|
||||
|
||||
for _, img := range images {
|
||||
for _, t := range img.RepoTags {
|
||||
if t == d.Server.Container.Image {
|
||||
if t != d.Image() {
|
||||
continue
|
||||
}
|
||||
|
||||
d.Server.Log().WithFields(log.Fields{
|
||||
"image": d.Server.Container.Image,
|
||||
"image": d.Image(),
|
||||
"error": errors.New(err.Error()),
|
||||
}).Warn("unable to pull requested image from remote source, however the image exists locally")
|
||||
|
||||
@@ -591,13 +679,12 @@ func (d *DockerEnvironment) ensureImageExists(c *client.Client) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
log.WithField("image", d.Server.Container.Image).Debug("pulling docker image... this could take a bit of time")
|
||||
log.WithField("image", d.Image()).Debug("pulling docker image... this could take a bit of time")
|
||||
|
||||
// I'm not sure what the best approach here is, but this will block execution until the image
|
||||
// is done being pulled, which is what we need.
|
||||
@@ -616,12 +703,6 @@ func (d *DockerEnvironment) ensureImageExists(c *client.Client) error {
|
||||
// Creates a new container for the server using all of the data that is currently
|
||||
// available for it. If the container already exists it will be returned.
|
||||
func (d *DockerEnvironment) Create() error {
|
||||
ctx := context.Background()
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Ensure the data directory exists before getting too far through this process.
|
||||
if err := d.Server.Filesystem.EnsureDataDirectory(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
@@ -630,50 +711,87 @@ func (d *DockerEnvironment) Create() error {
|
||||
// If the container already exists don't hit the user with an error, just return
|
||||
// the current information about it which is what we would do when creating the
|
||||
// container anyways.
|
||||
if _, err := cli.ContainerInspect(ctx, d.Server.Uuid); err == nil {
|
||||
if _, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err == nil {
|
||||
return nil
|
||||
} else if !client.IsErrNotFound(err) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Try to pull the requested image before creating the container.
|
||||
if err := d.ensureImageExists(cli); err != nil {
|
||||
if err := d.ensureImageExists(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
conf := &container.Config{
|
||||
Hostname: "container",
|
||||
Hostname: d.Server.Id(),
|
||||
Domainname: config.Get().Docker.Domainname,
|
||||
User: strconv.Itoa(config.Get().System.User.Uid),
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
OpenStdin: true,
|
||||
Tty: true,
|
||||
|
||||
ExposedPorts: d.exposedPorts(),
|
||||
|
||||
Image: d.Server.Container.Image,
|
||||
Env: d.environmentVariables(),
|
||||
|
||||
Image: d.Image(),
|
||||
Env: d.Server.GetEnvironmentVariables(),
|
||||
Labels: map[string]string{
|
||||
"Service": "Pterodactyl",
|
||||
"ContainerType": "server_process",
|
||||
},
|
||||
}
|
||||
|
||||
mounts := []mount.Mount{
|
||||
{
|
||||
Target: "/home/container",
|
||||
Source: d.Server.Filesystem.Path(),
|
||||
Type: mount.TypeBind,
|
||||
ReadOnly: false,
|
||||
},
|
||||
}
|
||||
|
||||
var mounted bool
|
||||
for _, m := range d.Server.Config().Mounts {
|
||||
mounted = false
|
||||
source := filepath.Clean(m.Source)
|
||||
target := filepath.Clean(m.Target)
|
||||
|
||||
for _, allowed := range config.Get().AllowedMounts {
|
||||
if !strings.HasPrefix(source, allowed) {
|
||||
continue
|
||||
}
|
||||
|
||||
mounts = append(mounts, mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
|
||||
Source: source,
|
||||
Target: target,
|
||||
ReadOnly: m.ReadOnly,
|
||||
})
|
||||
|
||||
mounted = true
|
||||
break
|
||||
}
|
||||
|
||||
logger := log.WithFields(log.Fields{
|
||||
"server": d.Server.Id(),
|
||||
"source_path": source,
|
||||
"target_path": target,
|
||||
"read_only": m.ReadOnly,
|
||||
})
|
||||
|
||||
if mounted {
|
||||
logger.Debug("attaching mount to server's container")
|
||||
} else {
|
||||
logger.Warn("skipping mount because it isn't allowed")
|
||||
}
|
||||
}
|
||||
|
||||
hostConf := &container.HostConfig{
|
||||
PortBindings: d.portBindings(),
|
||||
|
||||
// Configure the mounts for this container. First mount the server data directory
|
||||
// into the container as a r/w bind.
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Target: "/home/container",
|
||||
Source: d.Server.Filesystem.Path(),
|
||||
Type: mount.TypeBind,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
Mounts: mounts,
|
||||
|
||||
// Configure the /tmp folder mapping in containers. This is necessary for some
|
||||
// games that need to make use of it for downloads and other installation processes.
|
||||
@@ -708,7 +826,7 @@ func (d *DockerEnvironment) Create() error {
|
||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||
}
|
||||
|
||||
if _, err := cli.ContainerCreate(ctx, conf, hostConf, nil, d.Server.Uuid); err != nil {
|
||||
if _, err := d.Client.ContainerCreate(context.Background(), conf, hostConf, nil, d.Server.Id()); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
@@ -718,7 +836,7 @@ func (d *DockerEnvironment) Create() error {
|
||||
// Sends the specified command to the stdin of the running container instance. There is no
|
||||
// confirmation that this data is sent successfully, only that it gets pushed into the stdin.
|
||||
func (d *DockerEnvironment) SendCommand(c string) error {
|
||||
if !d.attached {
|
||||
if !d.IsAttached() {
|
||||
return errors.New("attempting to send command to non-attached instance")
|
||||
}
|
||||
|
||||
@@ -730,9 +848,7 @@ func (d *DockerEnvironment) SendCommand(c string) error {
|
||||
// Reads the log file for the server. This does not care if the server is running or not, it will
|
||||
// simply try to read the last X bytes of the file and return them.
|
||||
func (d *DockerEnvironment) Readlog(len int64) ([]string, error) {
|
||||
ctx := context.Background()
|
||||
|
||||
j, err := d.Client.ContainerInspect(ctx, d.Server.Uuid)
|
||||
j, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -804,42 +920,12 @@ func (d *DockerEnvironment) parseLogToStrings(b []byte) ([]string, error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Returns the environment variables for a server in KEY="VALUE" form.
|
||||
func (d *DockerEnvironment) environmentVariables() []string {
|
||||
zone, _ := time.Now().In(time.Local).Zone()
|
||||
|
||||
var out = []string{
|
||||
fmt.Sprintf("TZ=%s", zone),
|
||||
fmt.Sprintf("STARTUP=%s", d.Server.Invocation),
|
||||
fmt.Sprintf("SERVER_MEMORY=%d", d.Server.Build.MemoryLimit),
|
||||
fmt.Sprintf("SERVER_IP=%s", d.Server.Allocations.DefaultMapping.Ip),
|
||||
fmt.Sprintf("SERVER_PORT=%d", d.Server.Allocations.DefaultMapping.Port),
|
||||
}
|
||||
|
||||
eloop:
|
||||
for k, v := range d.Server.EnvVars {
|
||||
for _, e := range out {
|
||||
if strings.HasPrefix(e, strings.ToUpper(k)) {
|
||||
continue eloop
|
||||
}
|
||||
}
|
||||
|
||||
out = append(out, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (d *DockerEnvironment) volumes() map[string]struct{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Converts the server allocation mappings into a format that can be understood
|
||||
// by Docker.
|
||||
func (d *DockerEnvironment) portBindings() nat.PortMap {
|
||||
var out = nat.PortMap{}
|
||||
|
||||
for ip, ports := range d.Server.Allocations.Mappings {
|
||||
for ip, ports := range d.Server.Config().Allocations.Mappings {
|
||||
for _, port := range ports {
|
||||
// Skip over invalid ports.
|
||||
if port < 0 || port > 65535 {
|
||||
@@ -889,14 +975,14 @@ func (d *DockerEnvironment) exposedPorts() nat.PortSet {
|
||||
// the same or higher than the memory limit.
|
||||
func (d *DockerEnvironment) getResourcesForServer() container.Resources {
|
||||
return container.Resources{
|
||||
Memory: d.Server.Build.BoundedMemoryLimit(),
|
||||
MemoryReservation: d.Server.Build.MemoryLimit * 1_000_000,
|
||||
MemorySwap: d.Server.Build.ConvertedSwap(),
|
||||
CPUQuota: d.Server.Build.ConvertedCpuLimit(),
|
||||
Memory: d.Server.Build().BoundedMemoryLimit(),
|
||||
MemoryReservation: d.Server.Build().MemoryLimit * 1_000_000,
|
||||
MemorySwap: d.Server.Build().ConvertedSwap(),
|
||||
CPUQuota: d.Server.Build().ConvertedCpuLimit(),
|
||||
CPUPeriod: 100_000,
|
||||
CPUShares: 1024,
|
||||
BlkioWeight: d.Server.Build.IoWeight,
|
||||
OomKillDisable: &d.Server.Container.OomDisabled,
|
||||
CpusetCpus: d.Server.Build.Threads,
|
||||
BlkioWeight: d.Server.Build().IoWeight,
|
||||
OomKillDisable: &d.Server.Config().Container.OomDisabled,
|
||||
CpusetCpus: d.Server.Build().Threads,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
ignore "github.com/sabhiram/go-gitignore"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -25,18 +26,27 @@ import (
|
||||
)
|
||||
|
||||
// Error returned when there is a bad path provided to one of the FS calls.
|
||||
var InvalidPathResolution = errors.New("invalid path resolution")
|
||||
type PathResolutionError struct{}
|
||||
|
||||
// Returns the error response in a string form that can be more easily consumed.
|
||||
func (pre PathResolutionError) Error() string {
|
||||
return "invalid path resolution"
|
||||
}
|
||||
|
||||
func IsPathResolutionError(err error) bool {
|
||||
_, ok := err.(PathResolutionError)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
type Filesystem struct {
|
||||
// The server object associated with this Filesystem.
|
||||
Server *Server
|
||||
|
||||
Configuration *config.SystemConfiguration
|
||||
cacheDiskMu sync.Mutex
|
||||
}
|
||||
|
||||
// Returns the root path that contains all of a server's data.
|
||||
func (fs *Filesystem) Path() string {
|
||||
return filepath.Join(fs.Configuration.Data, fs.Server.Uuid)
|
||||
return filepath.Join(config.Get().System.Data, fs.Server.Id())
|
||||
}
|
||||
|
||||
// Normalizes a directory being passed in to ensure the user is not able to escape
|
||||
@@ -48,12 +58,8 @@ func (fs *Filesystem) Path() string {
|
||||
func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
var nonExistentPathResolution string
|
||||
|
||||
// Calling filpath.Clean on the joined directory will resolve it to the absolute path,
|
||||
// removing any ../ type of resolution arguments, and leaving us with a direct path link.
|
||||
//
|
||||
// This will also trim the existing root path off the beginning of the path passed to
|
||||
// the function since that can get a bit messy.
|
||||
r := filepath.Clean(filepath.Join(fs.Path(), strings.TrimPrefix(p, fs.Path())))
|
||||
// Start with a cleaned up path before checking the more complex bits.
|
||||
r := fs.unsafeFilePath(p)
|
||||
|
||||
// At the same time, evaluate the symlink status and determine where this file or folder
|
||||
// is truly pointing to.
|
||||
@@ -71,7 +77,7 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
for k := range parts {
|
||||
try = strings.Join(parts[:(len(parts)-k)], "/")
|
||||
|
||||
if !strings.HasPrefix(try, fs.Path()) {
|
||||
if !fs.unsafeIsInDataDirectory(try) {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -86,8 +92,8 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
// If the new path doesn't start with their root directory there is clearly an escape
|
||||
// attempt going on, and we should NOT resolve this path for them.
|
||||
if nonExistentPathResolution != "" {
|
||||
if !strings.HasPrefix(nonExistentPathResolution, fs.Path()) {
|
||||
return "", InvalidPathResolution
|
||||
if !fs.unsafeIsInDataDirectory(nonExistentPathResolution) {
|
||||
return "", PathResolutionError{}
|
||||
}
|
||||
|
||||
// If the nonExistentPathResolution variable is not empty then the initial path requested
|
||||
@@ -100,11 +106,99 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
// If the requested directory from EvalSymlinks begins with the server root directory go
|
||||
// ahead and return it. If not we'll return an error which will block any further action
|
||||
// on the file.
|
||||
if strings.HasPrefix(p, fs.Path()) {
|
||||
if fs.unsafeIsInDataDirectory(p) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return "", InvalidPathResolution
|
||||
return "", PathResolutionError{}
|
||||
}
|
||||
|
||||
// Generate a path to the file by cleaning it up and appending the root server path to it. This
|
||||
// DOES NOT gaurantee that the file resolves within the server data directory. You'll want to use
|
||||
// the fs.unsafeIsInDataDirectory(p) function to confirm.
|
||||
func (fs *Filesystem) unsafeFilePath(p string) string {
|
||||
// Calling filpath.Clean on the joined directory will resolve it to the absolute path,
|
||||
// removing any ../ type of resolution arguments, and leaving us with a direct path link.
|
||||
//
|
||||
// This will also trim the existing root path off the beginning of the path passed to
|
||||
// the function since that can get a bit messy.
|
||||
return filepath.Clean(filepath.Join(fs.Path(), strings.TrimPrefix(p, fs.Path())))
|
||||
}
|
||||
|
||||
// Check that that path string starts with the server data directory path. This function DOES NOT
|
||||
// validate that the rest of the path does not end up resolving out of this directory, or that the
|
||||
// targeted file or folder is not a symlink doing the same thing.
|
||||
func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool {
|
||||
return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/")
|
||||
}
|
||||
|
||||
// Helper function to keep some of the codebase a little cleaner. Returns a "safe" version of the path
|
||||
// joined with a file. This is important because you cannot just assume that appending a file to a cleaned
|
||||
// path will result in a cleaned path to that file. For example, imagine you have the following scenario:
|
||||
//
|
||||
// my_bad_file -> symlink:/etc/passwd
|
||||
//
|
||||
// cleaned := SafePath("../../etc") -> "/"
|
||||
// filepath.Join(cleaned, my_bad_file) -> "/my_bad_file"
|
||||
//
|
||||
// You might think that "/my_bad_file" is fine since it isn't pointing to the original "../../etc/my_bad_file".
|
||||
// However, this doesn't account for symlinks where the file might be pointing outside of the directory, so
|
||||
// calling a function such as Chown against it would chown the symlinked location, and not the file within the
|
||||
// Wings daemon.
|
||||
func (fs *Filesystem) SafeJoin(dir string, f os.FileInfo) (string, error) {
|
||||
if f.Mode()&os.ModeSymlink != 0 {
|
||||
return fs.SafePath(filepath.Join(dir, f.Name()))
|
||||
}
|
||||
|
||||
return filepath.Join(dir, f.Name()), nil
|
||||
}
|
||||
|
||||
// Executes the fs.SafePath function in parallel against an array of paths. If any of the calls
|
||||
// fails an error will be returned.
|
||||
func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
|
||||
var cleaned []string
|
||||
|
||||
// Simple locker function to avoid racy appends to the array of cleaned paths.
|
||||
var m = new(sync.Mutex)
|
||||
var push = func(c string) {
|
||||
m.Lock()
|
||||
cleaned = append(cleaned, c)
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
// Create an error group that we can use to run processes in parallel while retaining
|
||||
// the ability to cancel the entire process immediately should any of it fail.
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
|
||||
// Iterate over all of the paths and generate a cleaned path, if there is an error for any
|
||||
// of the files, abort the process.
|
||||
for _, p := range paths {
|
||||
// Create copy so we can use it within the goroutine correctly.
|
||||
pi := p
|
||||
|
||||
// Recursively call this function to continue digging through the directory tree within
|
||||
// a seperate goroutine. If the context is canceled abort this process.
|
||||
g.Go(func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// If the callback returns true, go ahead and keep walking deeper. This allows
|
||||
// us to programatically continue deeper into directories, or stop digging
|
||||
// if that pathway knows it needs nothing else.
|
||||
if c, err := fs.SafePath(pi); err != nil {
|
||||
return err
|
||||
} else {
|
||||
push(c)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Block until all of the routines finish and have returned a value.
|
||||
return cleaned, g.Wait()
|
||||
}
|
||||
|
||||
// Determines if the directory a file is trying to be added to has enough space available
|
||||
@@ -113,18 +207,45 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
// Because determining the amount of space being used by a server is a taxing operation we
|
||||
// will load it all up into a cache and pull from that as long as the key is not expired.
|
||||
func (fs *Filesystem) HasSpaceAvailable() bool {
|
||||
var space = fs.Server.Build.DiskSpace
|
||||
space := fs.Server.Build().DiskSpace
|
||||
|
||||
size, err := fs.getCachedDiskUsage()
|
||||
if err != nil {
|
||||
fs.Server.Log().WithField("error", err).Warn("failed to determine root server directory size")
|
||||
}
|
||||
|
||||
// Determine if their folder size, in bytes, is smaller than the amount of space they've
|
||||
// been allocated.
|
||||
fs.Server.Proc().SetDisk(size)
|
||||
|
||||
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
||||
//
|
||||
// Technically we could skip disk space calculation because we don't need to check if the server exceeds it's limit
|
||||
// but because this method caches the disk usage it would be best to calculate the disk usage and always
|
||||
// return true.
|
||||
if space <= 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// If we have a match in the cache, use that value in the return. No need to perform an expensive
|
||||
// disk operation, even if this is an empty value.
|
||||
if x, exists := fs.Server.Cache.Get("disk_used"); exists {
|
||||
fs.Server.Resources.Disk = x.(int64)
|
||||
return (x.(int64) / 1000.0 / 1000.0) <= space
|
||||
return (size / 1000.0 / 1000.0) <= space
|
||||
}
|
||||
|
||||
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
||||
// as needed without overly taxing the system. This will prioritize the value from the cache to avoid
|
||||
// excessive IO usage. We will only walk the filesystem and determine the size of the directory if there
|
||||
// is no longer a cached value.
|
||||
func (fs *Filesystem) getCachedDiskUsage() (int64, error) {
|
||||
// Obtain an exclusive lock on this process so that we don't unintentionally run it at the same
|
||||
// time as another running process. Once the lock is available it'll read from the cache for the
|
||||
// second call rather than hitting the disk in parallel.
|
||||
//
|
||||
// This effectively the same speed as running this call in parallel since this cache will return
|
||||
// instantly on the second call.
|
||||
fs.cacheDiskMu.Lock()
|
||||
defer fs.cacheDiskMu.Unlock()
|
||||
|
||||
if x, exists := fs.Server.cache.Get("disk_used"); exists {
|
||||
return x.(int64), nil
|
||||
}
|
||||
|
||||
// If there is no size its either because there is no data (in which case running this function
|
||||
@@ -132,37 +253,30 @@ func (fs *Filesystem) HasSpaceAvailable() bool {
|
||||
// grab the size of their data directory. This is a taxing operation, so we want to store it in
|
||||
// the cache once we've gotten it.
|
||||
size, err := fs.DirectorySize("/")
|
||||
if err != nil {
|
||||
fs.Server.Log().WithField("error", err).Warn("failed to determine root server directory size")
|
||||
}
|
||||
|
||||
// Always cache the size, even if there is an error. We want to always return that value
|
||||
// so that we don't cause an endless loop of determining the disk size if there is a temporary
|
||||
// error encountered.
|
||||
fs.Server.Cache.Set("disk_used", size, time.Second*60)
|
||||
fs.Server.cache.Set("disk_used", size, time.Second*60)
|
||||
|
||||
// Determine if their folder size, in bytes, is smaller than the amount of space they've
|
||||
// been allocated.
|
||||
fs.Server.Resources.Disk = size
|
||||
|
||||
return (size / 1000.0 / 1000.0) <= space
|
||||
return size, err
|
||||
}
|
||||
|
||||
// Determines the directory size of a given location by running parallel tasks to iterate
|
||||
// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
|
||||
// on locations with tons of files, so it is recommended that you cache the output.
|
||||
func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
||||
w := fs.NewWalker()
|
||||
ctx := context.Background()
|
||||
|
||||
var size int64
|
||||
err := w.Walk(dir, ctx, func(f os.FileInfo, _ string) bool {
|
||||
// Only increment the size when we're dealing with a file specifically, otherwise
|
||||
// just continue digging deeper until there are no more directories to iterate over.
|
||||
err := fs.Walk(dir, func(_ string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fs.handleWalkerError(err, f)
|
||||
}
|
||||
|
||||
if !f.IsDir() {
|
||||
atomic.AddInt64(&size, f.Size())
|
||||
}
|
||||
return true
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return size, err
|
||||
@@ -244,7 +358,7 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
||||
|
||||
// Finally, chown the file to ensure the permissions don't end up out-of-whack
|
||||
// if we had just created it.
|
||||
return fs.Chown(p)
|
||||
return fs.Chown(cleaned)
|
||||
}
|
||||
|
||||
// Defines the stat struct object.
|
||||
@@ -332,6 +446,20 @@ func (fs *Filesystem) Rename(from string, to string) error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if f, err := os.Stat(cleanedFrom); err != nil {
|
||||
return errors.WithStack(err)
|
||||
} else {
|
||||
d := cleanedTo
|
||||
if !f.IsDir() {
|
||||
d = strings.TrimSuffix(d, path.Base(cleanedTo))
|
||||
}
|
||||
|
||||
// Ensure that the directory we're moving into exists correctly on the system.
|
||||
if mkerr := os.MkdirAll(d, 0644); mkerr != nil {
|
||||
return errors.WithStack(mkerr)
|
||||
}
|
||||
}
|
||||
|
||||
return os.Rename(cleanedFrom, cleanedTo)
|
||||
}
|
||||
|
||||
@@ -346,7 +474,7 @@ func (fs *Filesystem) Chown(path string) error {
|
||||
if s, err := os.Stat(cleaned); err != nil {
|
||||
return errors.WithStack(err)
|
||||
} else if !s.IsDir() {
|
||||
return os.Chown(cleaned, fs.Configuration.User.Uid, fs.Configuration.User.Gid)
|
||||
return os.Chown(cleaned, config.Get().System.User.Uid, config.Get().System.User.Gid)
|
||||
}
|
||||
|
||||
return fs.chownDirectory(cleaned)
|
||||
@@ -372,16 +500,27 @@ func (fs *Filesystem) chownDirectory(path string) error {
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
// Do not attempt to chmod a symlink. Go's os.Chown function will affect the symlink
|
||||
// so if it points to a location outside the data directory the user would be able to
|
||||
// (un)intentionally modify that files permissions.
|
||||
if f.Mode()&os.ModeSymlink != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
p, err := fs.SafeJoin(cleaned, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if f.IsDir() {
|
||||
wg.Add(1)
|
||||
|
||||
go func(p string) {
|
||||
defer wg.Done()
|
||||
fs.chownDirectory(p)
|
||||
}(filepath.Join(cleaned, f.Name()))
|
||||
}(p)
|
||||
} else {
|
||||
// Chown the file.
|
||||
os.Chown(filepath.Join(cleaned, f.Name()), fs.Configuration.User.Uid, fs.Configuration.User.Gid)
|
||||
os.Chown(p, config.Get().System.User.Uid, config.Get().System.User.Gid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -473,17 +612,26 @@ func (fs *Filesystem) Copy(p string) error {
|
||||
// Deletes a file or folder from the system. Prevents the user from accidentally
|
||||
// (or maliciously) removing their root server data directory.
|
||||
func (fs *Filesystem) Delete(p string) error {
|
||||
cleaned, err := fs.SafePath(p)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
// This is one of the few (only?) places in the codebase where we're explictly not using
|
||||
// the SafePath functionality when working with user provided input. If we did, you would
|
||||
// not be able to delete a file that is a symlink pointing to a location outside of the data
|
||||
// directory.
|
||||
//
|
||||
// We also want to avoid resolving a symlink that points _within_ the data directory and thus
|
||||
// deleting the actual source file for the symlink rather than the symlink itself. For these
|
||||
// purposes just resolve the actual file path using filepath.Join() and confirm that the path
|
||||
// exists within the data directory.
|
||||
resolved := fs.unsafeFilePath(p)
|
||||
if !fs.unsafeIsInDataDirectory(resolved) {
|
||||
return PathResolutionError{}
|
||||
}
|
||||
|
||||
// Block any whoopsies.
|
||||
if cleaned == fs.Path() {
|
||||
if resolved == fs.Path() {
|
||||
return errors.New("cannot delete root server directory")
|
||||
}
|
||||
|
||||
return os.RemoveAll(cleaned)
|
||||
return os.RemoveAll(resolved)
|
||||
}
|
||||
|
||||
// Lists the contents of a given directory and returns stat information about each
|
||||
@@ -516,7 +664,14 @@ func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) {
|
||||
|
||||
var m = "inode/directory"
|
||||
if !f.IsDir() {
|
||||
cleanedp, _ := fs.SafeJoin(cleaned, f)
|
||||
if cleanedp != "" {
|
||||
m, _, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
|
||||
} else {
|
||||
// Just pass this for an unknown type because the file could not safely be resolved within
|
||||
// the server data path.
|
||||
m = "application/octet-stream"
|
||||
}
|
||||
}
|
||||
|
||||
out[idx] = &Stat{
|
||||
@@ -573,9 +728,6 @@ func (fs *Filesystem) GetIncludedFiles(dir string, ignored []string) (*backup.In
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := fs.NewWalker()
|
||||
ctx := context.Background()
|
||||
|
||||
i, err := ignore.CompileIgnoreLines(ignored...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -584,20 +736,104 @@ func (fs *Filesystem) GetIncludedFiles(dir string, ignored []string) (*backup.In
|
||||
// Walk through all of the files and directories on a server. This callback only returns
|
||||
// files found, and will keep walking deeper and deeper into directories.
|
||||
inc := new(backup.IncludedFiles)
|
||||
if err := w.Walk(cleaned, ctx, func(f os.FileInfo, p string) bool {
|
||||
|
||||
if err := fs.Walk(cleaned, func(p string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fs.handleWalkerError(err, f)
|
||||
}
|
||||
|
||||
// Avoid unnecessary parsing if there are no ignored files, nothing will match anyways
|
||||
// so no reason to call the function.
|
||||
if len(ignored) == 0 || !i.MatchesPath(strings.TrimPrefix(p, fs.Path() + "/")) {
|
||||
if len(ignored) == 0 || !i.MatchesPath(strings.TrimPrefix(p, fs.Path()+"/")) {
|
||||
inc.Push(&f, p)
|
||||
}
|
||||
|
||||
// We can't just abort if the path is technically ignored. It is possible there is a nested
|
||||
// file or folder that should not be excluded, so in this case we need to just keep going
|
||||
// until we get to a final state.
|
||||
return true
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return inc, nil
|
||||
}
|
||||
|
||||
// Compresses all of the files matching the given paths in the specified directory. This function
|
||||
// also supports passing nested paths to only compress certain files and folders when working in
|
||||
// a larger directory. This effectively creates a local backup, but rather than ignoring specific
|
||||
// files and folders, it takes an allow-list of files and folders.
|
||||
//
|
||||
// All paths are relative to the dir that is passed in as the first argument, and the compressed
|
||||
// file will be placed at that location named `archive-{date}.tar.gz`.
|
||||
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
|
||||
cleanedRootDir, err := fs.SafePath(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Take all of the paths passed in and merge them together with the root directory we've gotten.
|
||||
for i, p := range paths {
|
||||
paths[i] = filepath.Join(cleanedRootDir, p)
|
||||
}
|
||||
|
||||
cleaned, err := fs.ParallelSafePath(paths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inc := new(backup.IncludedFiles)
|
||||
// Iterate over all of the cleaned paths and merge them into a large object of final file
|
||||
// paths to pass into the archiver. As directories are encountered this will drop into them
|
||||
// and look for all of the files.
|
||||
for _, p := range cleaned {
|
||||
f, err := os.Stat(p)
|
||||
if err != nil {
|
||||
fs.Server.Log().WithField("error", err).WithField("path", p).Debug("failed to stat file or directory for compression")
|
||||
continue
|
||||
}
|
||||
|
||||
if f.IsDir() {
|
||||
err := fs.Walk(p, func(s string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return fs.handleWalkerError(err, info)
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
inc.Push(&info, s)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
inc.Push(&f, p)
|
||||
}
|
||||
}
|
||||
|
||||
a := &backup.Archive{TrimPrefix: fs.Path(), Files: inc}
|
||||
|
||||
d := path.Join(cleanedRootDir, fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")))
|
||||
|
||||
return a.Create(d, context.Background())
|
||||
}
|
||||
|
||||
// Handle errors encountered when walking through directories.
|
||||
//
|
||||
// If there is a path resolution error just skip the item entirely. Only return this for a
|
||||
// directory, otherwise return nil. Returning this error for a file will stop the walking
|
||||
// for the remainder of the directory. This is assuming an os.FileInfo struct was even returned.
|
||||
func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
|
||||
if !IsPathResolutionError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if f != nil && f.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
130
server/filesystem_unarchive.go
Normal file
130
server/filesystem_unarchive.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Look through a given archive and determine if decompressing it would put the server over
|
||||
// its allocated disk space limit.
|
||||
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (bool, error) {
|
||||
// Don't waste time trying to determine this if we know the server will have the space for
|
||||
// it since there is no limit.
|
||||
if fs.Server.Build().DiskSpace <= 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
var dirSize int64
|
||||
var cErr error
|
||||
// Get the cached size in a parallel process so that if it is not cached we are not
|
||||
// waiting an unnecessary amount of time on this call.
|
||||
go func() {
|
||||
wg.Add(1)
|
||||
defer wg.Done()
|
||||
|
||||
dirSize, cErr = fs.getCachedDiskUsage()
|
||||
}()
|
||||
|
||||
var size int64
|
||||
// In a seperate thread, walk over the archive and figure out just how large the final
|
||||
// output would be from dearchiving it.
|
||||
go func() {
|
||||
wg.Add(1)
|
||||
defer wg.Done()
|
||||
|
||||
// Walk all of the files and calculate the total decompressed size of this archive.
|
||||
archiver.Walk(source, func(f archiver.File) error {
|
||||
atomic.AddInt64(&size, f.Size())
|
||||
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return ((dirSize + size) / 1000.0 / 1000.0) <= fs.Server.Build().DiskSpace, cErr
|
||||
}
|
||||
|
||||
// Decompress a file in a given directory by using the archiver tool to infer the file
|
||||
// type and go from there. This will walk over all of the files within the given archive
|
||||
// and ensure that there is not a zip-slip attack being attempted by validating that the
|
||||
// final path is within the server data directory.
|
||||
func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
|
||||
// and then extract that file from the archive and write it to the disk. If any part of this process
|
||||
// encounters an error the entire process will be stopped.
|
||||
return archiver.Walk(source, func(f archiver.File) error {
|
||||
// Don't waste time with directories, we don't need to create them if they have no contents, and
|
||||
// we will ensure the directory exists when opening the file for writing anyways.
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fs.extractFileFromArchive(f)
|
||||
})
|
||||
}
|
||||
|
||||
// Extracts a single file from the archive and writes it to the disk after verifying that it will end
|
||||
// up in the server data directory.
|
||||
func (fs *Filesystem) extractFileFromArchive(f archiver.File) error {
|
||||
var name string
|
||||
|
||||
switch s := f.Sys().(type) {
|
||||
case *tar.Header:
|
||||
name = s.Name
|
||||
case *gzip.Header:
|
||||
name = s.Name
|
||||
case *zip.FileHeader:
|
||||
name = s.Name
|
||||
default:
|
||||
return errors.New(fmt.Sprintf("could not parse underlying data source with type %s", reflect.TypeOf(s).String()))
|
||||
}
|
||||
|
||||
// Guard against a zip-slip attack and prevent writing a file to a destination outside of
|
||||
// the server root directory.
|
||||
p, err := fs.SafePath(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure the directory structure for this file exists before trying to write the file
|
||||
// to the disk, otherwise we'll have some unexpected fun.
|
||||
if err := os.MkdirAll(strings.TrimSuffix(p, filepath.Base(p)), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open the file and truncate it if it already exists.
|
||||
o, err := os.OpenFile(p, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer o.Close()
|
||||
|
||||
_, cerr := io.Copy(o, f)
|
||||
|
||||
return cerr
|
||||
}
|
||||
@@ -2,69 +2,140 @@ package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"github.com/gammazero/workerpool"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type FileWalker struct {
|
||||
*Filesystem
|
||||
}
|
||||
|
||||
type PooledFileWalker struct {
|
||||
wg sync.WaitGroup
|
||||
pool *workerpool.WorkerPool
|
||||
callback filepath.WalkFunc
|
||||
cancel context.CancelFunc
|
||||
|
||||
err error
|
||||
errOnce sync.Once
|
||||
|
||||
Filesystem *Filesystem
|
||||
}
|
||||
|
||||
// Returns a new walker instance.
|
||||
func (fs *Filesystem) NewWalker() *FileWalker {
|
||||
return &FileWalker{fs}
|
||||
}
|
||||
|
||||
// Iterate over all of the files and directories within a given directory. When a file is
|
||||
// found the callback will be called with the file information. If a directory is encountered
|
||||
// it will be recursively passed back through to this function.
|
||||
func (fw *FileWalker) Walk(dir string, ctx context.Context, callback func (os.FileInfo, string) bool) error {
|
||||
cleaned, err := fw.SafePath(dir)
|
||||
// Creates a new pooled file walker that will concurrently walk over a given directory but limit itself
|
||||
// to a worker pool as to not completely flood out the system or cause a process crash.
|
||||
func newPooledWalker(fs *Filesystem) *PooledFileWalker {
|
||||
return &PooledFileWalker{
|
||||
Filesystem: fs,
|
||||
// Create a worker pool that is the same size as the number of processors available on the
|
||||
// system. Going much higher doesn't provide much of a performance boost, and is only more
|
||||
// likely to lead to resource overloading anyways.
|
||||
pool: workerpool.New(runtime.GOMAXPROCS(0)),
|
||||
}
|
||||
}
|
||||
|
||||
// Process a given path by calling the callback function for all of the files and directories within
|
||||
// the path, and then dropping into any directories that we come across.
|
||||
func (w *PooledFileWalker) process(path string) error {
|
||||
p, err := w.Filesystem.SafePath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get all of the files from this directory.
|
||||
files, err := ioutil.ReadDir(cleaned)
|
||||
files, err := ioutil.ReadDir(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create an error group that we can use to run processes in parallel while retaining
|
||||
// the ability to cancel the entire process immediately should any of it fail.
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
// Loop over all of the files and directories in the given directory and call the provided
|
||||
// callback function. If we encounter a directory, push that directory onto the worker queue
|
||||
// to be processed.
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
fi := f
|
||||
p := filepath.Join(cleaned, f.Name())
|
||||
// Recursively call this function to continue digging through the directory tree within
|
||||
// a seperate goroutine. If the context is canceled abort this process.
|
||||
g.Go(func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
// If the callback returns true, go ahead and keep walking deeper. This allows
|
||||
// us to programatically continue deeper into directories, or stop digging
|
||||
// if that pathway knows it needs nothing else.
|
||||
if callback(fi, p) {
|
||||
return fw.Walk(p, ctx, callback)
|
||||
sp, err := w.Filesystem.SafeJoin(p, f)
|
||||
if err != nil {
|
||||
// Let the callback function handle what to do if there is a path resolution error because a
|
||||
// dangerous path was resolved. If there is an error returned, return from this entire process
|
||||
// otherwise just skip over this specific file. We don't care if its a file or a directory at
|
||||
// this point since either way we're skipping it, however, still check for the SkipDir since that
|
||||
// would be thrown otherwise.
|
||||
if err = w.callback(sp, f, err); err != nil && err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
i, err := os.Stat(sp)
|
||||
// You might end up getting an error about a file or folder not existing if the given path
|
||||
// if it is an invalid symlink. We can safely just skip over these files I believe.
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Call the user-provided callback for this file or directory. If an error is returned that is
|
||||
// not a SkipDir call, abort the entire process and bubble that error up.
|
||||
if err = w.callback(sp, i, err); err != nil && err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
|
||||
// If this is a directory, and we didn't get a SkipDir error, continue through by pushing another
|
||||
// job to the pool to handle it. If we requested a skip, don't do anything just continue on to the
|
||||
// next item.
|
||||
if i.IsDir() && err != filepath.SkipDir {
|
||||
w.push(sp)
|
||||
} else if !i.IsDir() && err == filepath.SkipDir {
|
||||
// Per the spec for the callback, if we get a SkipDir error but it is returned for an item
|
||||
// that is _not_ a directory, abort the remaining operations on the directory.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push a new path into the worker pool and increment the waitgroup so that we do not return too
|
||||
// early and cause panic's as internal directories attempt to submit to the pool.
|
||||
func (w *PooledFileWalker) push(path string) {
|
||||
w.wg.Add(1)
|
||||
w.pool.Submit(func() {
|
||||
defer w.wg.Done()
|
||||
if err := w.process(path); err != nil {
|
||||
w.errOnce.Do(func() {
|
||||
w.err = err
|
||||
if w.cancel != nil {
|
||||
w.cancel()
|
||||
}
|
||||
})
|
||||
} else {
|
||||
// If this isn't a directory, go ahead and pass the file information into the
|
||||
// callback. We don't care about the response since we won't be stepping into
|
||||
// anything from here.
|
||||
callback(f, filepath.Join(cleaned, f.Name()))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Walks the given directory and executes the callback function for all of the files and directories
|
||||
// that are encountered.
|
||||
func (fs *Filesystem) Walk(dir string, callback filepath.WalkFunc) error {
|
||||
w := newPooledWalker(fs)
|
||||
w.callback = callback
|
||||
|
||||
_, cancel := context.WithCancel(context.Background())
|
||||
w.cancel = cancel
|
||||
|
||||
w.push(dir)
|
||||
|
||||
w.wg.Wait()
|
||||
w.pool.StopWait()
|
||||
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Block until all of the routines finish and have returned a value.
|
||||
return g.Wait()
|
||||
return nil
|
||||
}
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"html/template"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -24,7 +25,17 @@ import (
|
||||
|
||||
// Executes the installation stack for a server process. Bubbles any errors up to the calling
|
||||
// function which should handle contacting the panel to notify it of the server state.
|
||||
func (s *Server) Install() error {
|
||||
//
|
||||
// Pass true as the first arugment in order to execute a server sync before the process to
|
||||
// ensure the latest information is used.
|
||||
func (s *Server) Install(sync bool) error {
|
||||
if sync {
|
||||
s.Log().Info("syncing server state with remote source before executing installation process")
|
||||
if err := s.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := s.internalInstall()
|
||||
|
||||
s.Log().Debug("notifying panel of server install state")
|
||||
@@ -54,12 +65,12 @@ func (s *Server) Reinstall() error {
|
||||
}
|
||||
}
|
||||
|
||||
return s.Install()
|
||||
return s.Install(true)
|
||||
}
|
||||
|
||||
// Internal installation function used to simplify reporting back to the Panel.
|
||||
func (s *Server) internalInstall() error {
|
||||
script, rerr, err := api.NewRequester().GetInstallationScript(s.Uuid)
|
||||
script, rerr, err := api.NewRequester().GetInstallationScript(s.Id())
|
||||
if err != nil || rerr != nil {
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -159,7 +170,7 @@ func (s *Server) AbortInstallation() {
|
||||
|
||||
// Removes the installer container for the server.
|
||||
func (ip *InstallationProcess) RemoveContainer() {
|
||||
err := ip.client.ContainerRemove(ip.context, ip.Server.Uuid+"_installer", types.ContainerRemoveOptions{
|
||||
err := ip.client.ContainerRemove(ip.context, ip.Server.Id()+"_installer", types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
})
|
||||
@@ -302,7 +313,7 @@ func (ip *InstallationProcess) BeforeExecute() (string, error) {
|
||||
Force: true,
|
||||
}
|
||||
|
||||
if err := ip.client.ContainerRemove(ip.context, ip.Server.Uuid+"_installer", opts); err != nil {
|
||||
if err := ip.client.ContainerRemove(ip.context, ip.Server.Id()+"_installer", opts); err != nil {
|
||||
if !client.IsErrNotFound(err) {
|
||||
e = append(e, err)
|
||||
}
|
||||
@@ -322,7 +333,7 @@ func (ip *InstallationProcess) BeforeExecute() (string, error) {
|
||||
|
||||
// Returns the log path for the installation process.
|
||||
func (ip *InstallationProcess) GetLogPath() string {
|
||||
return filepath.Join(config.Get().System.GetInstallLogPath(), ip.Server.Uuid+".log")
|
||||
return filepath.Join(config.Get().System.GetInstallLogPath(), ip.Server.Id()+".log")
|
||||
}
|
||||
|
||||
// Cleans up after the execution of the installation process. This grabs the logs from the
|
||||
@@ -349,7 +360,37 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
||||
defer f.Close()
|
||||
|
||||
// We write the contents of the container output to a more "permanent" file so that they
|
||||
// can be referenced after this container is deleted.
|
||||
// can be referenced after this container is deleted. We'll also include the environment
|
||||
// variables passed into the container to make debugging things a little easier.
|
||||
ip.Server.Log().WithField("path", ip.GetLogPath()).Debug("writing most recent installation logs to disk")
|
||||
|
||||
tmpl, err := template.New("header").Parse(`Pterodactyl Server Installation Log
|
||||
|
||||
|
|
||||
| Details
|
||||
| ------------------------------
|
||||
Server UUID: {{.Server.Id()}}
|
||||
Container Image: {{.Script.ContainerImage}}
|
||||
Container Entrypoint: {{.Script.Entrypoint}}
|
||||
|
||||
|
|
||||
| Environment Variables
|
||||
| ------------------------------
|
||||
{{ range $key, $value := .Server.GetEnvironmentVariables }} {{ $value }}
|
||||
{{ end }}
|
||||
|
||||
|
|
||||
| Script Output
|
||||
| ------------------------------
|
||||
`)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := tmpl.Execute(f, ip); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, reader); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -407,7 +448,7 @@ func (ip *InstallationProcess) Execute(installPath string) (string, error) {
|
||||
}
|
||||
|
||||
ip.Server.Log().WithField("install_script", installPath+"/install.sh").Info("creating install container for server process")
|
||||
r, err := ip.client.ContainerCreate(ip.context, conf, hostConf, nil, ip.Server.Uuid+"_installer")
|
||||
r, err := ip.client.ContainerCreate(ip.context, conf, hostConf, nil, ip.Server.Id()+"_installer")
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
@@ -475,7 +516,7 @@ func (ip *InstallationProcess) StreamOutput(id string) error {
|
||||
func (s *Server) SyncInstallState(successful bool) error {
|
||||
r := api.NewRequester()
|
||||
|
||||
rerr, err := r.SendInstallationStatus(s.Uuid, successful)
|
||||
rerr, err := r.SendInstallationStatus(s.Id(), successful)
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
|
||||
@@ -27,9 +27,11 @@ func (s *Server) onConsoleOutput(data string) {
|
||||
// If the specific line of output is one that would mark the server as started,
|
||||
// set the server to that state. Only do this if the server is not currently stopped
|
||||
// or stopping.
|
||||
if s.GetState() == ProcessStartingState && strings.Contains(data, s.processConfiguration.Startup.Done) {
|
||||
match := s.ProcessConfiguration().Startup.Done
|
||||
|
||||
if s.GetState() == ProcessStartingState && strings.Contains(data, match) {
|
||||
s.Log().WithFields(log.Fields{
|
||||
"match": s.processConfiguration.Startup.Done,
|
||||
"match": match,
|
||||
"against": data,
|
||||
}).Debug("detected server in running state based on console line output")
|
||||
|
||||
@@ -40,7 +42,8 @@ func (s *Server) onConsoleOutput(data string) {
|
||||
// set the server to be in a stopping state, otherwise crash detection will kick in and
|
||||
// cause the server to unexpectedly restart on the user.
|
||||
if s.IsRunning() {
|
||||
if s.processConfiguration.Stop.Type == api.ProcessStopCommand && data == s.processConfiguration.Stop.Value {
|
||||
stop := s.ProcessConfiguration().Stop
|
||||
if stop.Type == api.ProcessStopCommand && data == stop.Value {
|
||||
s.SetState(ProcessStoppingState)
|
||||
}
|
||||
}
|
||||
|
||||
120
server/loader.go
Normal file
120
server/loader.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/apex/log"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"time"
|
||||
)
|
||||
|
||||
var servers = NewCollection(nil)
|
||||
|
||||
func GetServers() *Collection {
|
||||
return servers
|
||||
}
|
||||
|
||||
// Iterates over a given directory and loads all of the servers listed before returning
|
||||
// them to the calling function.
|
||||
func LoadDirectory() error {
|
||||
if len(servers.items) != 0 {
|
||||
return errors.New("cannot call LoadDirectory with a non-nil collection")
|
||||
}
|
||||
|
||||
// We could theoretically use a standard wait group here, however doing
|
||||
// that introduces the potential to crash the program due to too many
|
||||
// open files. This wouldn't happen on a small setup, but once the daemon is
|
||||
// handling many servers you run that risk.
|
||||
//
|
||||
// For now just process 10 files at a time, that should be plenty fast to
|
||||
// read and parse the YAML. We should probably make this configurable down
|
||||
// the road to help big instances scale better.
|
||||
wg := sizedwaitgroup.New(10)
|
||||
|
||||
configs, rerr, err := api.NewRequester().GetAllServerConfigurations()
|
||||
if err != nil || rerr != nil {
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return errors.New(rerr.String())
|
||||
}
|
||||
|
||||
log.Debug("retrieving cached server states from disk")
|
||||
states, err := getServerStates()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
log.WithField("total_configs", len(configs)).Debug("looping over received configurations from API")
|
||||
for uuid, data := range configs {
|
||||
wg.Add()
|
||||
|
||||
go func(uuid string, data *api.ServerConfigurationResponse) {
|
||||
defer wg.Done()
|
||||
|
||||
log.WithField("uuid", uuid).Debug("creating server object from configuration")
|
||||
s, err := FromConfiguration(data)
|
||||
if err != nil {
|
||||
log.WithField("server", uuid).WithField("error", err).Error("failed to load server, skipping...")
|
||||
return
|
||||
}
|
||||
|
||||
if state, exists := states[s.Id()]; exists {
|
||||
s.SetState(state)
|
||||
s.Log().WithField("state", s.GetState()).Debug("loaded server state from cache file")
|
||||
}
|
||||
|
||||
servers.Add(s)
|
||||
}(uuid, data)
|
||||
}
|
||||
|
||||
// Wait until we've processed all of the configuration files in the directory
|
||||
// before continuing.
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initializes a server using a data byte array. This will be marshaled into the
|
||||
// given struct using a YAML marshaler. This will also configure the given environment
|
||||
// for a server.
|
||||
func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
|
||||
cfg := Configuration{}
|
||||
if err := defaults.Set(&cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := new(Server)
|
||||
s.cfg = cfg
|
||||
|
||||
if err := s.UpdateDataStructure(data.Settings, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.AddEventListeners()
|
||||
|
||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||
// this logic in. When we're ready to support other environment we'll need to make
|
||||
// some modifications here obviously.
|
||||
if err := NewDockerEnvironment(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.cache = cache.New(time.Minute*10, time.Minute*15)
|
||||
s.Archiver = Archiver{
|
||||
Server: s,
|
||||
}
|
||||
s.Filesystem = Filesystem{
|
||||
Server: s,
|
||||
}
|
||||
|
||||
// Forces the configuration to be synced with the panel.
|
||||
if err := s.SyncWithConfiguration(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
8
server/mount.go
Normal file
8
server/mount.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package server
|
||||
|
||||
// Mount represents a Server Mount.
|
||||
type Mount struct {
|
||||
Target string `json:"target"`
|
||||
Source string `json:"source"`
|
||||
ReadOnly bool `json:"read_only"`
|
||||
}
|
||||
10
server/process.go
Normal file
10
server/process.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package server
|
||||
|
||||
import "github.com/pterodactyl/wings/api"
|
||||
|
||||
func (s *Server) ProcessConfiguration() *api.ProcessConfiguration {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.procConfig
|
||||
}
|
||||
@@ -3,27 +3,38 @@ package server
|
||||
import (
|
||||
"github.com/docker/docker/api/types"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Defines the current resource usage for a given server instance. If a server is offline you
|
||||
// should obviously expect memory and CPU usage to be 0. However, disk will always be returned
|
||||
// since that is not dependent on the server being running to collect that data.
|
||||
type ResourceUsage struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// The current server status.
|
||||
State string `json:"state" default:"offline"`
|
||||
|
||||
// The total amount of memory, in bytes, that this server instance is consuming. This is
|
||||
// calculated slightly differently than just using the raw Memory field that the stats
|
||||
// return from the container, so please check the code setting this value for how that
|
||||
// is calculated.
|
||||
Memory uint64 `json:"memory_bytes"`
|
||||
|
||||
// The total amount of memory this container or resource can use. Inside Docker this is
|
||||
// going to be higher than you'd expect because we're automatically allocating overhead
|
||||
// abilities for the container, so its not going to be a perfect match.
|
||||
MemoryLimit uint64 `json:"memory_limit_bytes"`
|
||||
|
||||
// The absolute CPU usage is the amount of CPU used in relation to the entire system and
|
||||
// does not take into account any limits on the server process itself.
|
||||
CpuAbsolute float64 `json:"cpu_absolute"`
|
||||
|
||||
// The current disk space being used by the server. This is cached to prevent slow lookup
|
||||
// issues on frequent refreshes.
|
||||
Disk int64 `json:"disk_bytes"`
|
||||
|
||||
// Current network transmit in & out for a container.
|
||||
Network struct {
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
@@ -31,6 +42,66 @@ type ResourceUsage struct {
|
||||
} `json:"network"`
|
||||
}
|
||||
|
||||
// Returns the resource usage stats for the server instance. If the server is not running, only the
|
||||
// disk space currently used will be returned. When the server is running all of the other stats will
|
||||
// be returned.
|
||||
//
|
||||
// When a process is stopped all of the stats are zeroed out except for the disk.
|
||||
func (s *Server) Proc() *ResourceUsage {
|
||||
s.resources.mu.RLock()
|
||||
defer s.resources.mu.RUnlock()
|
||||
|
||||
return &s.resources
|
||||
}
|
||||
|
||||
// Returns the servers current state.
|
||||
func (ru *ResourceUsage) getInternalState() string {
|
||||
ru.mu.RLock()
|
||||
defer ru.mu.RUnlock()
|
||||
|
||||
return ru.State
|
||||
}
|
||||
|
||||
// Sets the new state for the server.
|
||||
func (ru *ResourceUsage) setInternalState(state string) {
|
||||
ru.mu.Lock()
|
||||
ru.State = state
|
||||
ru.mu.Unlock()
|
||||
}
|
||||
|
||||
// Resets the usages values to zero, used when a server is stopped to ensure we don't hold
|
||||
// onto any values incorrectly.
|
||||
func (ru *ResourceUsage) Empty() {
|
||||
ru.mu.Lock()
|
||||
defer ru.mu.Unlock()
|
||||
|
||||
ru.Memory = 0
|
||||
ru.CpuAbsolute = 0
|
||||
ru.Network.TxBytes = 0
|
||||
ru.Network.RxBytes = 0
|
||||
}
|
||||
|
||||
func (ru *ResourceUsage) SetDisk(i int64) {
|
||||
ru.mu.Lock()
|
||||
defer ru.mu.Unlock()
|
||||
|
||||
ru.Disk = i
|
||||
}
|
||||
|
||||
func (ru *ResourceUsage) UpdateFromDocker(v *types.StatsJSON) {
|
||||
ru.mu.Lock()
|
||||
defer ru.mu.Unlock()
|
||||
|
||||
ru.CpuAbsolute = ru.calculateDockerAbsoluteCpu(&v.PreCPUStats, &v.CPUStats)
|
||||
ru.Memory = ru.calculateDockerMemory(v.MemoryStats)
|
||||
ru.MemoryLimit = v.MemoryStats.Limit
|
||||
}
|
||||
|
||||
func (ru *ResourceUsage) UpdateNetworkBytes(nw *types.NetworkStats) {
|
||||
atomic.AddUint64(&ru.Network.RxBytes, nw.RxBytes)
|
||||
atomic.AddUint64(&ru.Network.TxBytes, nw.TxBytes)
|
||||
}
|
||||
|
||||
// The "docker stats" CLI call does not return the same value as the types.MemoryStats.Usage
|
||||
// value which can be rather confusing to people trying to compare panel usage to
|
||||
// their stats output.
|
||||
@@ -40,7 +111,7 @@ type ResourceUsage struct {
|
||||
// correct memory value anyways.
|
||||
//
|
||||
// @see https://github.com/docker/cli/blob/96e1d1d6/cli/command/container/stats_helpers.go#L227-L249
|
||||
func (ru *ResourceUsage) CalculateDockerMemory(stats types.MemoryStats) uint64 {
|
||||
func (ru *ResourceUsage) calculateDockerMemory(stats types.MemoryStats) uint64 {
|
||||
if v, ok := stats.Stats["total_inactive_file"]; ok && v < stats.Usage {
|
||||
return stats.Usage - v
|
||||
}
|
||||
@@ -56,7 +127,7 @@ func (ru *ResourceUsage) CalculateDockerMemory(stats types.MemoryStats) uint64 {
|
||||
// by the defined CPU limits on the container.
|
||||
//
|
||||
// @see https://github.com/docker/cli/blob/aa097cf1aa19099da70930460250797c8920b709/cli/command/container/stats_helpers.go#L166
|
||||
func (ru *ResourceUsage) CalculateAbsoluteCpu(pStats *types.CPUStats, stats *types.CPUStats) float64 {
|
||||
func (ru *ResourceUsage) calculateDockerAbsoluteCpu(pStats *types.CPUStats, stats *types.CPUStats) float64 {
|
||||
// Calculate the change in CPU usage between the current and previous reading.
|
||||
cpuDelta := float64(stats.CPUUsage.TotalUsage) - float64(pStats.CPUUsage.TotalUsage)
|
||||
|
||||
|
||||
273
server/server.go
273
server/server.go
@@ -4,66 +4,37 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var servers *Collection
|
||||
|
||||
func GetServers() *Collection {
|
||||
return servers
|
||||
}
|
||||
|
||||
// High level definition for a server instance being controlled by Wings.
|
||||
type Server struct {
|
||||
// The unique identifier for the server that should be used when referencing
|
||||
// it against the Panel API (and internally). This will be used when naming
|
||||
// docker containers as well as in log output.
|
||||
Uuid string `json:"uuid"`
|
||||
// Internal mutex used to block actions that need to occur sequentially, such as
|
||||
// writing the configuration to the disk.
|
||||
sync.RWMutex
|
||||
|
||||
// Whether or not the server is in a suspended state. Suspended servers cannot
|
||||
// be started or modified except in certain scenarios by an admin user.
|
||||
Suspended bool `json:"suspended"`
|
||||
// Maintains the configuration for the server. This is the data that gets returned by the Panel
|
||||
// such as build settings and container images.
|
||||
cfg Configuration
|
||||
|
||||
// The power state of the server.
|
||||
State string `default:"offline" json:"state"`
|
||||
// The crash handler for this server instance.
|
||||
crasher CrashHandler
|
||||
|
||||
// The command that should be used when booting up the server instance.
|
||||
Invocation string `json:"invocation"`
|
||||
|
||||
// An array of environment variables that should be passed along to the running
|
||||
// server process.
|
||||
EnvVars map[string]string `json:"environment" yaml:"environment"`
|
||||
|
||||
Archiver Archiver `json:"-" yaml:"-"`
|
||||
CrashDetection CrashDetection `json:"crash_detection" yaml:"crash_detection"`
|
||||
Build BuildSettings `json:"build"`
|
||||
Allocations Allocations `json:"allocations"`
|
||||
Environment Environment `json:"-" yaml:"-"`
|
||||
Filesystem Filesystem `json:"-" yaml:"-"`
|
||||
Resources ResourceUsage `json:"resources" yaml:"-"`
|
||||
|
||||
Container struct {
|
||||
// Defines the Docker image that will be used for this server
|
||||
Image string `json:"image,omitempty"`
|
||||
// If set to true, OOM killer will be disabled on the server's Docker container.
|
||||
// If not present (nil) we will default to disabling it.
|
||||
OomDisabled bool `default:"true" json:"oom_disabled" yaml:"oom_disabled"`
|
||||
} `json:"container,omitempty"`
|
||||
resources ResourceUsage
|
||||
Archiver Archiver `json:"-"`
|
||||
Environment Environment `json:"-"`
|
||||
Filesystem Filesystem `json:"-"`
|
||||
|
||||
// Server cache used to store frequently requested information in memory and make
|
||||
// certain long operations return faster. For example, FS disk space usage.
|
||||
Cache *cache.Cache `json:"-" yaml:"-"`
|
||||
cache *cache.Cache
|
||||
|
||||
// Events emitted by the server instance.
|
||||
emitter *EventBus
|
||||
@@ -71,17 +42,13 @@ type Server struct {
|
||||
// Defines the process configuration for the server instance. This is dynamically
|
||||
// fetched from the Pterodactyl Server instance each time the server process is
|
||||
// started, and then cached here.
|
||||
processConfiguration *api.ProcessConfiguration
|
||||
procConfig *api.ProcessConfiguration
|
||||
|
||||
// Tracks the installation process for this server and prevents a server from running
|
||||
// two installer processes at the same time. This also allows us to cancel a running
|
||||
// installation process, for example when a server is deleted from the panel while the
|
||||
// installer process is still running.
|
||||
installer InstallerDetails
|
||||
|
||||
// Internal mutex used to block actions that need to occur sequentially, such as
|
||||
// writing the configuration to the disk.
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
type InstallerDetails struct {
|
||||
@@ -94,183 +61,9 @@ type InstallerDetails struct {
|
||||
sem *semaphore.Weighted
|
||||
}
|
||||
|
||||
// The build settings for a given server that impact docker container creation and
|
||||
// resource limits for a server instance.
|
||||
type BuildSettings struct {
|
||||
// The total amount of memory in megabytes that this server is allowed to
|
||||
// use on the host system.
|
||||
MemoryLimit int64 `json:"memory_limit" yaml:"memory"`
|
||||
|
||||
// The amount of additional swap space to be provided to a container instance.
|
||||
Swap int64 `json:"swap"`
|
||||
|
||||
// The relative weight for IO operations in a container. This is relative to other
|
||||
// containers on the system and should be a value between 10 and 1000.
|
||||
IoWeight uint16 `json:"io_weight" yaml:"io"`
|
||||
|
||||
// The percentage of CPU that this instance is allowed to consume relative to
|
||||
// the host. A value of 200% represents complete utilization of two cores. This
|
||||
// should be a value between 1 and THREAD_COUNT * 100.
|
||||
CpuLimit int64 `json:"cpu_limit" yaml:"cpu"`
|
||||
|
||||
// The amount of disk space in megabytes that a server is allowed to use.
|
||||
DiskSpace int64 `json:"disk_space" yaml:"disk"`
|
||||
|
||||
// Sets which CPU threads can be used by the docker instance.
|
||||
Threads string `json:"threads" yaml:"threads"`
|
||||
}
|
||||
|
||||
// Converts the CPU limit for a server build into a number that can be better understood
|
||||
// by the Docker environment. If there is no limit set, return -1 which will indicate to
|
||||
// Docker that it has unlimited CPU quota.
|
||||
func (b *BuildSettings) ConvertedCpuLimit() int64 {
|
||||
if b.CpuLimit == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return b.CpuLimit * 1000
|
||||
}
|
||||
|
||||
// Set the hard limit for memory usage to be 5% more than the amount of memory assigned to
|
||||
// the server. If the memory limit for the server is < 4G, use 10%, if less than 2G use
|
||||
// 15%. This avoids unexpected crashes from processes like Java which run over the limit.
|
||||
func (b *BuildSettings) MemoryOverheadMultiplier() float64 {
|
||||
if b.MemoryLimit <= 2048 {
|
||||
return 1.15
|
||||
} else if b.MemoryLimit <= 4096 {
|
||||
return 1.10
|
||||
}
|
||||
|
||||
return 1.05
|
||||
}
|
||||
|
||||
func (b *BuildSettings) BoundedMemoryLimit() int64 {
|
||||
return int64(math.Round(float64(b.MemoryLimit) * b.MemoryOverheadMultiplier() * 1_000_000))
|
||||
}
|
||||
|
||||
// Returns the amount of swap available as a total in bytes. This is returned as the amount
|
||||
// of memory available to the server initially, PLUS the amount of additional swap to include
|
||||
// which is the format used by Docker.
|
||||
func (b *BuildSettings) ConvertedSwap() int64 {
|
||||
if b.Swap < 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
return (b.Swap * 1_000_000) + b.BoundedMemoryLimit()
|
||||
}
|
||||
|
||||
// Defines the allocations available for a given server. When using the Docker environment
|
||||
// driver these correspond to mappings for the container that allow external connections.
|
||||
type Allocations struct {
|
||||
// Defines the default allocation that should be used for this server. This is
|
||||
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
||||
// files or the startup arguments for a server.
|
||||
DefaultMapping struct {
|
||||
Ip string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
} `json:"default" yaml:"default"`
|
||||
|
||||
// Mappings contains all of the ports that should be assigned to a given server
|
||||
// attached to the IP they correspond to.
|
||||
Mappings map[string][]int `json:"mappings"`
|
||||
}
|
||||
|
||||
// Iterates over a given directory and loads all of the servers listed before returning
|
||||
// them to the calling function.
|
||||
func LoadDirectory() error {
|
||||
// We could theoretically use a standard wait group here, however doing
|
||||
// that introduces the potential to crash the program due to too many
|
||||
// open files. This wouldn't happen on a small setup, but once the daemon is
|
||||
// handling many servers you run that risk.
|
||||
//
|
||||
// For now just process 10 files at a time, that should be plenty fast to
|
||||
// read and parse the YAML. We should probably make this configurable down
|
||||
// the road to help big instances scale better.
|
||||
wg := sizedwaitgroup.New(10)
|
||||
|
||||
configs, rerr, err := api.NewRequester().GetAllServerConfigurations()
|
||||
if err != nil || rerr != nil {
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return errors.New(rerr.String())
|
||||
}
|
||||
|
||||
states, err := getServerStates()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
servers = NewCollection(nil)
|
||||
|
||||
for uuid, data := range configs {
|
||||
wg.Add()
|
||||
|
||||
go func(uuid string, data *api.ServerConfigurationResponse) {
|
||||
defer wg.Done()
|
||||
|
||||
s, err := FromConfiguration(data)
|
||||
if err != nil {
|
||||
log.WithField("server", uuid).WithField("error", err).Error("failed to load server, skipping...")
|
||||
return
|
||||
}
|
||||
|
||||
if state, exists := states[s.Uuid]; exists {
|
||||
s.SetState(state)
|
||||
s.Log().WithField("state", s.GetState()).Debug("loaded server state from cache file")
|
||||
}
|
||||
|
||||
servers.Add(s)
|
||||
}(uuid, data)
|
||||
}
|
||||
|
||||
// Wait until we've processed all of the configuration files in the directory
|
||||
// before continuing.
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initializes a server using a data byte array. This will be marshaled into the
|
||||
// given struct using a YAML marshaler. This will also configure the given environment
|
||||
// for a server.
|
||||
func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
|
||||
s := new(Server)
|
||||
|
||||
if err := defaults.Set(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.UpdateDataStructure(data.Settings, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.AddEventListeners()
|
||||
|
||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||
// this logic in. When we're ready to support other environment we'll need to make
|
||||
// some modifications here obviously.
|
||||
if err := NewDockerEnvironment(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.Cache = cache.New(time.Minute*10, time.Minute*15)
|
||||
s.Archiver = Archiver{
|
||||
Server: s,
|
||||
}
|
||||
s.Filesystem = Filesystem{
|
||||
Configuration: &config.Get().System,
|
||||
Server: s,
|
||||
}
|
||||
s.Resources = ResourceUsage{}
|
||||
|
||||
// Forces the configuration to be synced with the panel.
|
||||
if err := s.SyncWithConfiguration(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, nil
|
||||
// Returns the UUID for the server instance.
|
||||
func (s *Server) Id() string {
|
||||
return s.Config().GetUuid()
|
||||
}
|
||||
|
||||
// Returns all of the environment variables that should be assigned to a running
|
||||
@@ -280,28 +73,28 @@ func (s *Server) GetEnvironmentVariables() []string {
|
||||
|
||||
var out = []string{
|
||||
fmt.Sprintf("TZ=%s", zone),
|
||||
fmt.Sprintf("STARTUP=%s", s.Invocation),
|
||||
fmt.Sprintf("SERVER_MEMORY=%d", s.Build.MemoryLimit),
|
||||
fmt.Sprintf("SERVER_IP=%s", s.Allocations.DefaultMapping.Ip),
|
||||
fmt.Sprintf("SERVER_PORT=%d", s.Allocations.DefaultMapping.Port),
|
||||
fmt.Sprintf("STARTUP=%s", s.Config().Invocation),
|
||||
fmt.Sprintf("SERVER_MEMORY=%d", s.Build().MemoryLimit),
|
||||
fmt.Sprintf("SERVER_IP=%s", s.Config().Allocations.DefaultMapping.Ip),
|
||||
fmt.Sprintf("SERVER_PORT=%d", s.Config().Allocations.DefaultMapping.Port),
|
||||
}
|
||||
|
||||
eloop:
|
||||
for k, v := range s.EnvVars {
|
||||
for k := range s.Config().EnvVars {
|
||||
for _, e := range out {
|
||||
if strings.HasPrefix(e, strings.ToUpper(k)) {
|
||||
continue eloop
|
||||
}
|
||||
}
|
||||
|
||||
out = append(out, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
|
||||
out = append(out, fmt.Sprintf("%s=%s", strings.ToUpper(k), s.Config().EnvVars.Get(k)))
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (s *Server) Log() *log.Entry {
|
||||
return log.WithField("server", s.Uuid)
|
||||
return log.WithField("server", s.Id())
|
||||
}
|
||||
|
||||
// Syncs the state of the server on the Panel with Wings. This ensures that we're always
|
||||
@@ -333,7 +126,10 @@ func (s *Server) SyncWithConfiguration(cfg *api.ServerConfigurationResponse) err
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
s.processConfiguration = cfg.ProcessConfiguration
|
||||
s.Lock()
|
||||
s.procConfig = cfg.ProcessConfiguration
|
||||
s.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -358,7 +154,7 @@ func (s *Server) CreateEnvironment() error {
|
||||
|
||||
// Gets the process configuration data for the server.
|
||||
func (s *Server) GetProcessConfiguration() (*api.ServerConfigurationResponse, *api.RequestError, error) {
|
||||
return api.NewRequester().GetServerConfiguration(s.Uuid)
|
||||
return api.NewRequester().GetServerConfiguration(s.Id())
|
||||
}
|
||||
|
||||
// Helper function that can receieve a power action and then process the
|
||||
@@ -368,11 +164,7 @@ func (s *Server) HandlePowerAction(action PowerAction) error {
|
||||
case "start":
|
||||
return s.Environment.Start()
|
||||
case "restart":
|
||||
if err := s.Environment.WaitForStop(60, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.Environment.Start()
|
||||
return s.Environment.Restart()
|
||||
case "stop":
|
||||
return s.Environment.Stop()
|
||||
case "kill":
|
||||
@@ -381,3 +173,8 @@ func (s *Server) HandlePowerAction(action PowerAction) error {
|
||||
return errors.New("an invalid power action was provided")
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the server is marked as being suspended or not on the system.
|
||||
func (s *Server) IsSuspended() bool {
|
||||
return s.Config().Suspended
|
||||
}
|
||||
|
||||
@@ -13,6 +13,13 @@ import (
|
||||
|
||||
var stateMutex sync.Mutex
|
||||
|
||||
const (
|
||||
ProcessOfflineState = "offline"
|
||||
ProcessStartingState = "starting"
|
||||
ProcessRunningState = "running"
|
||||
ProcessStoppingState = "stopping"
|
||||
)
|
||||
|
||||
// Returns the state of the servers.
|
||||
func getServerStates() (map[string]string, error) {
|
||||
// Request a lock after we check if the file exists.
|
||||
@@ -40,7 +47,7 @@ func saveServerStates() error {
|
||||
// Get the states of all servers on the daemon.
|
||||
states := map[string]string{}
|
||||
for _, s := range GetServers().All() {
|
||||
states[s.Uuid] = s.GetState()
|
||||
states[s.Id()] = s.GetState()
|
||||
}
|
||||
|
||||
// Convert the map to a json object.
|
||||
@@ -60,13 +67,6 @@ func saveServerStates() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
ProcessOfflineState = "offline"
|
||||
ProcessStartingState = "starting"
|
||||
ProcessRunningState = "running"
|
||||
ProcessStoppingState = "stopping"
|
||||
)
|
||||
|
||||
// Sets the state of the server internally. This function handles crash detection as
|
||||
// well as reporting to event listeners for the server.
|
||||
func (s *Server) SetState(state string) error {
|
||||
@@ -76,16 +76,14 @@ func (s *Server) SetState(state string) error {
|
||||
|
||||
prevState := s.GetState()
|
||||
|
||||
// Obtain a mutex lock and update the current state of the server.
|
||||
s.Lock()
|
||||
s.State = state
|
||||
// Update the currently tracked state for the server.
|
||||
s.Proc().setInternalState(state)
|
||||
|
||||
// Emit the event to any listeners that are currently registered.
|
||||
s.Log().WithField("status", s.State).Debug("saw server status change event")
|
||||
s.Events().Publish(StatusEvent, s.State)
|
||||
|
||||
// Release the lock as it is no longer needed for the following actions.
|
||||
s.Unlock()
|
||||
if prevState != state {
|
||||
s.Log().WithField("status", s.Proc().State).Debug("saw server status change event")
|
||||
s.Events().Publish(StatusEvent, s.Proc().State)
|
||||
}
|
||||
|
||||
// Persist this change to the disk immediately so that should the Daemon be stopped or
|
||||
// crash we can immediately restore the server state.
|
||||
@@ -128,15 +126,14 @@ func (s *Server) SetState(state string) error {
|
||||
|
||||
// Returns the current state of the server in a race-safe manner.
|
||||
func (s *Server) GetState() string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
return s.State
|
||||
return s.Proc().getInternalState()
|
||||
}
|
||||
|
||||
// Determines if the server state is running or not. This is different than the
|
||||
// environment state, it is simply the tracked state from this daemon instance, and
|
||||
// not the response from Docker.
|
||||
func (s *Server) IsRunning() bool {
|
||||
return s.GetState() == ProcessRunningState || s.GetState() == ProcessStartingState
|
||||
st := s.GetState()
|
||||
|
||||
return st == ProcessRunningState || st == ProcessStartingState
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
// it is up to the specific environment to determine what needs to happen when
|
||||
// that is the case.
|
||||
func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
||||
src := new(Server)
|
||||
src := new(Configuration)
|
||||
if err := json.Unmarshal(data, src); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -23,13 +23,29 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
||||
// Don't allow obviously corrupted data to pass through into this function. If the UUID
|
||||
// doesn't match something has gone wrong and the API is attempting to meld this server
|
||||
// instance into a totally different one, which would be bad.
|
||||
if src.Uuid != "" && s.Uuid != "" && src.Uuid != s.Uuid {
|
||||
if src.Uuid != "" && s.Id() != "" && src.Uuid != s.Id() {
|
||||
return errors.New("attempting to merge a data stack with an invalid UUID")
|
||||
}
|
||||
|
||||
// Grab a copy of the configuration to work on.
|
||||
c := *s.Config()
|
||||
|
||||
// Lock our copy of the configuration since the defered unlock will end up acting upon this
|
||||
// new memory address rather than the old one. If we don't lock this, the defered unlock will
|
||||
// cause a panic when it goes to run. However, since we only update s.cfg at the end, if there
|
||||
// is an error before that point we'll still properly unlock the original configuration for the
|
||||
// server.
|
||||
c.mu.Lock()
|
||||
|
||||
// Lock the server configuration while we're doing this merge to avoid anything
|
||||
// trying to overwrite it or make modifications while we're sorting out what we
|
||||
// need to do.
|
||||
s.cfg.mu.Lock()
|
||||
defer s.cfg.mu.Unlock()
|
||||
|
||||
// Merge the new data object that we have received with the existing server data object
|
||||
// and then save it to the disk so it is persistent.
|
||||
if err := mergo.Merge(s, src, mergo.WithOverride); err != nil {
|
||||
if err := mergo.Merge(&c, src, mergo.WithOverride); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
@@ -39,9 +55,9 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
||||
// backfiring at some point, but until then...
|
||||
//
|
||||
// We'll go ahead and do this with swap as well.
|
||||
s.Build.CpuLimit = src.Build.CpuLimit
|
||||
s.Build.Swap = src.Build.Swap
|
||||
s.Build.DiskSpace = src.Build.DiskSpace
|
||||
c.Build.CpuLimit = src.Build.CpuLimit
|
||||
c.Build.Swap = src.Build.Swap
|
||||
c.Build.DiskSpace = src.Build.DiskSpace
|
||||
|
||||
// Mergo can't quite handle this boolean value correctly, so for now we'll just
|
||||
// handle this edge case manually since none of the other data passed through in this
|
||||
@@ -51,7 +67,7 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
s.Container.OomDisabled = v
|
||||
c.Container.OomDisabled = v
|
||||
}
|
||||
|
||||
// Mergo also cannot handle this boolean value.
|
||||
@@ -60,21 +76,28 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
s.Suspended = v
|
||||
c.Suspended = v
|
||||
}
|
||||
|
||||
// Environment and Mappings should be treated as a full update at all times, never a
|
||||
// true patch, otherwise we can't know what we're passing along.
|
||||
if src.EnvVars != nil && len(src.EnvVars) > 0 {
|
||||
s.EnvVars = src.EnvVars
|
||||
c.EnvVars = src.EnvVars
|
||||
}
|
||||
|
||||
if src.Allocations.Mappings != nil && len(src.Allocations.Mappings) > 0 {
|
||||
s.Allocations.Mappings = src.Allocations.Mappings
|
||||
c.Allocations.Mappings = src.Allocations.Mappings
|
||||
}
|
||||
|
||||
if src.Mounts != nil && len(src.Mounts) > 0 {
|
||||
c.Mounts = src.Mounts
|
||||
}
|
||||
|
||||
// Update the configuration once we have a lock on the configuration object.
|
||||
s.cfg = c
|
||||
|
||||
if background {
|
||||
s.runBackgroundActions()
|
||||
go s.runBackgroundActions()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -87,24 +110,22 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
||||
// These tasks run in independent threads where relevant to speed up any updates
|
||||
// that need to happen.
|
||||
func (s *Server) runBackgroundActions() {
|
||||
// Check if the s is now suspended, and if so and the process is not terminated
|
||||
// yet, do it immediately.
|
||||
if s.IsSuspended() && s.GetState() != ProcessOfflineState {
|
||||
s.Log().Info("server suspended with running process state, terminating now")
|
||||
|
||||
if err := s.Environment.WaitForStop(10, true); err != nil {
|
||||
s.Log().WithField("error", err).Warn("failed to terminate server environment after suspension")
|
||||
}
|
||||
}
|
||||
|
||||
if !s.IsSuspended() {
|
||||
// Update the environment in place, allowing memory and CPU usage to be adjusted
|
||||
// on the fly without the user needing to reboot (theoretically).
|
||||
go func(server *Server) {
|
||||
server.Log().Info("performing server limit modification on-the-fly")
|
||||
if err := server.Environment.InSituUpdate(); err != nil {
|
||||
server.Log().WithField("error", err).Warn("failed to perform on-the-fly update of the server environment")
|
||||
}
|
||||
}(s)
|
||||
|
||||
// Check if the server is now suspended, and if so and the process is not terminated
|
||||
// yet, do it immediately.
|
||||
go func(server *Server) {
|
||||
if server.Suspended && server.GetState() != ProcessOfflineState {
|
||||
server.Log().Info("server suspended with running process state, terminating now")
|
||||
|
||||
if err := server.Environment.WaitForStop(10, true); err != nil {
|
||||
server.Log().WithField("error", err).Warn("failed to terminate server environment after suspension")
|
||||
s.Log().Info("performing server limit modification on-the-fly")
|
||||
if err := s.Environment.InSituUpdate(); err != nil {
|
||||
s.Log().WithField("error", err).Warn("failed to perform on-the-fly update of the server environment")
|
||||
}
|
||||
}
|
||||
}(s)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"go.uber.org/zap"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func Initialize(config *config.Configuration) error {
|
||||
@@ -48,7 +49,7 @@ func Initialize(config *config.Configuration) error {
|
||||
|
||||
func validatePath(fs sftp_server.FileSystem, p string) (string, error) {
|
||||
s := server.GetServers().Find(func(server *server.Server) bool {
|
||||
return server.Uuid == fs.UUID
|
||||
return server.Id() == fs.UUID
|
||||
})
|
||||
|
||||
if s == nil {
|
||||
@@ -60,7 +61,7 @@ func validatePath(fs sftp_server.FileSystem, p string) (string, error) {
|
||||
|
||||
func validateDiskSpace(fs sftp_server.FileSystem) bool {
|
||||
s := server.GetServers().Find(func(server *server.Server) bool {
|
||||
return server.Uuid == fs.UUID
|
||||
return server.Id() == fs.UUID
|
||||
})
|
||||
|
||||
if s == nil {
|
||||
@@ -70,25 +71,48 @@ func validateDiskSpace(fs sftp_server.FileSystem) bool {
|
||||
return s.Filesystem.HasSpaceAvailable()
|
||||
}
|
||||
|
||||
var validUsernameRegexp = regexp.MustCompile(`^(?i)(.+)\.([a-z0-9]{8})$`)
|
||||
|
||||
// Validates a set of credentials for a SFTP login aganist Pterodactyl Panel and returns
|
||||
// the server's UUID if the credentials were valid.
|
||||
func validateCredentials(c sftp_server.AuthenticationRequest) (*sftp_server.AuthenticationResponse, error) {
|
||||
resp, err := api.NewRequester().ValidateSftpCredentials(c)
|
||||
|
||||
log.WithFields(log.Fields{"subsystem": "sftp", "username": c.User}).Debug("validating credentials for SFTP connection")
|
||||
|
||||
f := log.Fields{
|
||||
"subsystem": "sftp",
|
||||
"username": c.User,
|
||||
"ip": c.IP,
|
||||
}
|
||||
|
||||
// If the username doesn't meet the expected format that the Panel would even recognize just go ahead
|
||||
// and bail out of the process here to avoid accidentially brute forcing the panel if a bot decides
|
||||
// to connect to spam username attempts.
|
||||
if !validUsernameRegexp.MatchString(c.User) {
|
||||
log.WithFields(f).Warn("failed to validate user credentials (invalid format)")
|
||||
|
||||
return nil, new(sftp_server.InvalidCredentialsError)
|
||||
}
|
||||
|
||||
resp, err := api.NewRequester().ValidateSftpCredentials(c)
|
||||
if err != nil {
|
||||
if sftp_server.IsInvalidCredentialsError(err) {
|
||||
log.WithFields(f).Warn("failed to validate user credentials (invalid username or password)")
|
||||
} else {
|
||||
log.WithFields(f).Error("encountered an error while trying to validate user credentials")
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
s := server.GetServers().Find(func(server *server.Server) bool {
|
||||
return server.Uuid == resp.Server
|
||||
return server.Id() == resp.Server
|
||||
})
|
||||
|
||||
if s == nil {
|
||||
return resp, errors.New("no matching server with UUID found")
|
||||
}
|
||||
|
||||
s.Log().WithFields(log.Fields{"subsystem": "sftp", "username": c.User}).Debug("matched user to server instance, credentials successfully validated")
|
||||
s.Log().WithFields(f).Debug("credentials successfully validated and matched user to server instance")
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package system
|
||||
|
||||
const (
|
||||
var (
|
||||
// The current version of this software.
|
||||
Version = "0.0.1"
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user