Compare commits
145 Commits
v1.0.0-bet
...
v1.0.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e7746a8359 | ||
|
|
cb850fd81a | ||
|
|
5079c67aee | ||
|
|
e28c05ae56 | ||
|
|
21e58b57a1 | ||
|
|
16467fa7ff | ||
|
|
0cbaad5c72 | ||
|
|
a00288aa64 | ||
|
|
6de18f09e5 | ||
|
|
8315ff8ae1 | ||
|
|
0b9d923d15 | ||
|
|
f0eeaae747 | ||
|
|
085a02726b | ||
|
|
4f1b0c67d6 | ||
|
|
6e1844a8c9 | ||
|
|
7c3da84248 | ||
|
|
1b5684e6f8 | ||
|
|
115131575d | ||
|
|
21303dc517 | ||
|
|
daf682b991 | ||
|
|
a72d6f3768 | ||
|
|
d262c12b43 | ||
|
|
f3c8220bd9 | ||
|
|
7e1b7e7f36 | ||
|
|
b2d34cf8e7 | ||
|
|
a635cdd6b2 | ||
|
|
ae46add8ef | ||
|
|
a4e6c4b701 | ||
|
|
f4c10e5a23 | ||
|
|
b64f1897fb | ||
|
|
6fd7ed23e3 | ||
|
|
a98e376593 | ||
|
|
eefc11bd0d | ||
|
|
60ebde4447 | ||
|
|
b3eba78743 | ||
|
|
233cefd129 | ||
|
|
d60b2d6163 | ||
|
|
292f0d6452 | ||
|
|
7147f477e2 | ||
|
|
2cef055ff2 | ||
|
|
daf401e326 | ||
|
|
c1e591c99b | ||
|
|
79928aff76 | ||
|
|
1f1eb507a9 | ||
|
|
93228933bf | ||
|
|
7afd5854bd | ||
|
|
fe531e400d | ||
|
|
1c92178091 | ||
|
|
be990c9620 | ||
|
|
31d00333a7 | ||
|
|
7516ef1aa4 | ||
|
|
5ef58cadee | ||
|
|
a110d5768f | ||
|
|
13b89b93f2 | ||
|
|
39f3408e4f | ||
|
|
c04042d07a | ||
|
|
7d9c608f6b | ||
|
|
7a456dcac4 | ||
|
|
c071df2a31 | ||
|
|
f7948939eb | ||
|
|
f3419495cd | ||
|
|
9f95efa3ae | ||
|
|
3a6050446f | ||
|
|
63e7bde39c | ||
|
|
d339996b4e | ||
|
|
deb9305f56 | ||
|
|
c5f4c3cfcb | ||
|
|
860e300c22 | ||
|
|
82912595b7 | ||
|
|
65809b5731 | ||
|
|
e5b844d2c4 | ||
|
|
ea2630946a | ||
|
|
79a582a5f2 | ||
|
|
d6a3d9adb1 | ||
|
|
d284c4aec9 | ||
|
|
05a4730489 | ||
|
|
2dad3102e0 | ||
|
|
b33f14ddd9 | ||
|
|
1f6789cba3 | ||
|
|
073247e4e1 | ||
|
|
a3d83d23bd | ||
|
|
f318962371 | ||
|
|
db31722cfc | ||
|
|
d91de3d912 | ||
|
|
495ad4defd | ||
|
|
b03aa20c8d | ||
|
|
7d4a8d7f7e | ||
|
|
65b1b96b06 | ||
|
|
198a22f446 | ||
|
|
e1531802cf | ||
|
|
5c2686fc6d | ||
|
|
0ae286d617 | ||
|
|
62e5547c6d | ||
|
|
00a026c2a5 | ||
|
|
359564bd91 | ||
|
|
f8bffd8391 | ||
|
|
4b366ae19e | ||
|
|
82ffb9804d | ||
|
|
54510057bb | ||
|
|
6d7ab865d7 | ||
|
|
74097cc4ad | ||
|
|
4483bfa2aa | ||
|
|
662eb17241 | ||
|
|
bd063682dc | ||
|
|
c802a3397e | ||
|
|
276bd2be33 | ||
|
|
e83495a09e | ||
|
|
64cad5c35d | ||
|
|
911b809a4e | ||
|
|
3fe884670d | ||
|
|
804f3d5ca9 | ||
|
|
0bd28a4480 | ||
|
|
326b5b6554 | ||
|
|
cfca0d7f07 | ||
|
|
5e60cb2eb0 | ||
|
|
d178a0d96b | ||
|
|
fd83424ee2 | ||
|
|
483b652087 | ||
|
|
a6645aa741 | ||
|
|
ffd7357a1c | ||
|
|
b36f0de337 | ||
|
|
b2cf222a3a | ||
|
|
ced8a5bcbd | ||
|
|
7bba1d4fd6 | ||
|
|
1b2eb50a32 | ||
|
|
fab5d36917 | ||
|
|
ee184768b8 | ||
|
|
2e055cf630 | ||
|
|
fab489d264 | ||
|
|
7f93e5f9d5 | ||
|
|
ac011214f7 | ||
|
|
58262aa252 | ||
|
|
eba5aa8cbe | ||
|
|
b2797ed292 | ||
|
|
507d0100cf | ||
|
|
91d12ab9a7 | ||
|
|
1e2da95d26 | ||
|
|
2828eaed32 | ||
|
|
12d43a9f49 | ||
|
|
00ed6f3985 | ||
|
|
377cae4d48 | ||
|
|
9c5855663c | ||
|
|
da093e7cf7 | ||
|
|
df9c4835c4 | ||
|
|
65102966a1 |
33
.github/workflows/build-test.yml
vendored
Normal file
33
.github/workflows/build-test.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
name: "Build & Test"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches-ignore:
|
||||||
|
- 'master'
|
||||||
|
- 'release/**'
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: '^1.14.2'
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -ldflags "-X github.com/pterodactyl/wings/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_linux_amd64 -v wings.go
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: go test ./...
|
||||||
|
|
||||||
|
- name: Compress binary and make it executable
|
||||||
|
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
||||||
|
run: upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v2
|
||||||
|
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
||||||
|
with:
|
||||||
|
name: wings_linux_amd64
|
||||||
|
path: build/wings_linux_amd64
|
||||||
35
.github/workflows/codeql-analysis.yml
vendored
Normal file
35
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
name: "Code scanning - action"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 21 * * 6'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
CodeQL-Build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
# We must fetch at least the immediate parents so that if this is
|
||||||
|
# a pull request then we can checkout the head.
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
# If this run was triggered by a pull request event, then checkout
|
||||||
|
# the head of the pull request instead of the merge commit.
|
||||||
|
- run: git checkout HEAD^2
|
||||||
|
if: ${{ github.event_name == 'pull_request' }}
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
# Override language selection by uncommenting this and choosing your languages
|
||||||
|
with:
|
||||||
|
languages: go
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
||||||
87
.github/workflows/release.yml
vendored
Normal file
87
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
name: "Release"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: '^1.14.2'
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
env:
|
||||||
|
REF: ${{ github.ref }}
|
||||||
|
run: GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -ldflags "-X github.com/pterodactyl/wings/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_linux_amd64 -v wings.go
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: go test ./...
|
||||||
|
|
||||||
|
- name: Compress binary and make it executable
|
||||||
|
run: upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
||||||
|
|
||||||
|
- name: Extract changelog
|
||||||
|
env:
|
||||||
|
REF: ${{ github.ref }}
|
||||||
|
run: |
|
||||||
|
sed -n "/^## ${REF:10}/,/^## /{/^## /b;p}" CHANGELOG.md > ./RELEASE_CHANGELOG
|
||||||
|
echo ::set-output name=version_name::`sed -nr "s/^## (${REF:10} .*)$/\1/p" CHANGELOG.md`
|
||||||
|
|
||||||
|
- name: Create checksum and add to changelog
|
||||||
|
run: |
|
||||||
|
SUM=`cd build && sha256sum wings_linux_amd64`
|
||||||
|
echo -e "\n#### SHA256 Checksum\n\n\`\`\`\n$SUM\n\`\`\`\n" >> ./RELEASE_CHANGELOG
|
||||||
|
echo $SUM > checksum.txt
|
||||||
|
|
||||||
|
- name: Create release branch
|
||||||
|
env:
|
||||||
|
REF: ${{ github.ref }}
|
||||||
|
run: |
|
||||||
|
BRANCH=release/${REF:10}
|
||||||
|
git config --local user.email "ci@pterodactyl.io"
|
||||||
|
git config --local user.name "Pterodactyl CI"
|
||||||
|
git checkout -b $BRANCH
|
||||||
|
git push -u origin $BRANCH
|
||||||
|
sed -i "s/ Version = \".*\"/ Version = \"${REF:11}\"/" system/const.go
|
||||||
|
git add system/const.go
|
||||||
|
git commit -m "bump version for release"
|
||||||
|
git push
|
||||||
|
|
||||||
|
- name: Create Release
|
||||||
|
id: create_release
|
||||||
|
uses: actions/create-release@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
tag_name: ${{ github.ref }}
|
||||||
|
release_name: ${{ steps.extract_changelog.outputs.version_name }}
|
||||||
|
body_path: ./RELEASE_CHANGELOG
|
||||||
|
draft: true
|
||||||
|
prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
|
||||||
|
|
||||||
|
- name: Upload binary
|
||||||
|
id: upload-release-binary
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||||
|
asset_path: build/wings_linux_amd64
|
||||||
|
asset_name: wings_linux_amd64
|
||||||
|
asset_content_type: application/octet-stream
|
||||||
|
|
||||||
|
- name: Upload checksum
|
||||||
|
id: upload-release-checksum
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./checksum.txt
|
||||||
|
asset_name: checksum.txt
|
||||||
|
asset_content_type: text/plain
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -46,3 +46,4 @@ test_*/
|
|||||||
!.gitkeep
|
!.gitkeep
|
||||||
debug
|
debug
|
||||||
data/.states.json
|
data/.states.json
|
||||||
|
.DS_Store
|
||||||
|
|||||||
11
CHANGELOG.md
11
CHANGELOG.md
@@ -1,5 +1,16 @@
|
|||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.0.0-beta.3
|
||||||
|
### Fixed
|
||||||
|
* Daemon will no longer crash if someone requests a websocket for a deleted server.
|
||||||
|
* Temporary directories are now created properly if missing during the server installation process.
|
||||||
|
|
||||||
|
### Added
|
||||||
|
* Added support for using Amazon S3 as a backup location for archives.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* Memory overhead for containers is now 5/10/15% higher than the passed limit to account for JVM heap and prevent crashing.
|
||||||
|
|
||||||
## v1.0.0-alpha.2
|
## v1.0.0-alpha.2
|
||||||
### Added
|
### Added
|
||||||
* Ability to run an installation process for a server and notify the panel when completed.
|
* Ability to run an installation process for a server and notify the panel when completed.
|
||||||
|
|||||||
14
Dockerfile
Normal file
14
Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# ----------------------------------
|
||||||
|
# Pterodactyl Panel Dockerfile
|
||||||
|
# ----------------------------------
|
||||||
|
|
||||||
|
FROM golang:1.14-alpine
|
||||||
|
COPY . /go/wings/
|
||||||
|
WORKDIR /go/wings/
|
||||||
|
RUN apk add --no-cache upx \
|
||||||
|
&& go build -ldflags="-s -w" \
|
||||||
|
&& upx --brute wings
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
COPY --from=0 /go/wings/wings /usr/bin/
|
||||||
|
CMD ["wings","--config", "/var/lib/pterodactyl/config.yml"]
|
||||||
32
Makefile
32
Makefile
@@ -1,28 +1,12 @@
|
|||||||
BINARY = "build/wings"
|
build:
|
||||||
OSARCHLIST = "darwin/386 darwin/amd64 linux/386 linux/amd64 linux/arm linux/arm64 windows/386 windows/amd64"
|
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=/Users/dane/Sites/development/code" -o build/wings_linux_amd64 -v wings.go
|
||||||
|
|
||||||
all: $(BINARY)
|
compress:
|
||||||
|
upx --brute build/wings_*
|
||||||
|
|
||||||
$(BINARY):
|
cross-build: clean build compress
|
||||||
go build -o $(BINARY)
|
|
||||||
|
|
||||||
cross-build:
|
clean:
|
||||||
gox -osarch $(OSARCHLIST) -output "build/{{.Dir}}_{{.OS}}_{{.Arch}}"
|
rm -rf build/wings_*
|
||||||
|
|
||||||
.PHONY: install
|
.PHONY: all build compress clean
|
||||||
install:
|
|
||||||
go install
|
|
||||||
|
|
||||||
test:
|
|
||||||
go test `go list ./... | grep -v "/vendor/"`
|
|
||||||
|
|
||||||
coverage:
|
|
||||||
goverage -coverprofile=coverage.out ./...
|
|
||||||
go tool cover -html=coverage.out
|
|
||||||
|
|
||||||
dependencies:
|
|
||||||
glide install
|
|
||||||
|
|
||||||
install-tools:
|
|
||||||
go get -u github.com/mitchellh/gox
|
|
||||||
go get -u github.com/haya14busa/goverage
|
|
||||||
38
api/api.go
38
api/api.go
@@ -4,9 +4,9 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"go.uber.org/zap"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -45,6 +45,26 @@ func (r *PanelRequest) GetEndpoint(endpoint string) string {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Logs the request into the debug log with all of the important request bits.
|
||||||
|
// The authorization key will be cleaned up before being output.
|
||||||
|
func (r *PanelRequest) logDebug(req *http.Request) {
|
||||||
|
headers := make(map[string][]string)
|
||||||
|
for k, v := range req.Header {
|
||||||
|
if k != "Authorization" || len(v) == 0 {
|
||||||
|
headers[k] = v
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
headers[k] = []string{v[0][0:15] + "(redacted)"}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"method": req.Method,
|
||||||
|
"endpoint": req.URL.String(),
|
||||||
|
"headers": headers,
|
||||||
|
}).Debug("making request to external HTTP endpoint")
|
||||||
|
}
|
||||||
|
|
||||||
func (r *PanelRequest) Get(url string) (*http.Response, error) {
|
func (r *PanelRequest) Get(url string) (*http.Response, error) {
|
||||||
c := r.GetClient()
|
c := r.GetClient()
|
||||||
|
|
||||||
@@ -55,7 +75,7 @@ func (r *PanelRequest) Get(url string) (*http.Response, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("GET request to endpoint", zap.String("endpoint", r.GetEndpoint(url)), zap.Any("headers", req.Header))
|
r.logDebug(req)
|
||||||
|
|
||||||
return c.Do(req)
|
return c.Do(req)
|
||||||
}
|
}
|
||||||
@@ -70,7 +90,7 @@ func (r *PanelRequest) Post(url string, data []byte) (*http.Response, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("POST request to endpoint", zap.String("endpoint", r.GetEndpoint(url)), zap.Any("headers", req.Header))
|
r.logDebug(req)
|
||||||
|
|
||||||
return c.Do(req)
|
return c.Do(req)
|
||||||
}
|
}
|
||||||
@@ -110,6 +130,12 @@ func (r *PanelRequest) HttpResponseCode() int {
|
|||||||
return r.Response.StatusCode
|
return r.Response.StatusCode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsRequestError(err error) bool {
|
||||||
|
_, ok := err.(*RequestError)
|
||||||
|
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
type RequestError struct {
|
type RequestError struct {
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
@@ -117,10 +143,14 @@ type RequestError struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns the error response in a string form that can be more easily consumed.
|
// Returns the error response in a string form that can be more easily consumed.
|
||||||
func (re *RequestError) String() string {
|
func (re *RequestError) Error() string {
|
||||||
return fmt.Sprintf("%s: %s (HTTP/%s)", re.Code, re.Detail, re.Status)
|
return fmt.Sprintf("%s: %s (HTTP/%s)", re.Code, re.Detail, re.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (re *RequestError) String() string {
|
||||||
|
return re.Error()
|
||||||
|
}
|
||||||
|
|
||||||
type RequestErrorBag struct {
|
type RequestErrorBag struct {
|
||||||
Errors []RequestError `json:"errors"`
|
Errors []RequestError `json:"errors"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/sftp-server"
|
"github.com/pterodactyl/sftp-server"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (r *PanelRequest) ValidateSftpCredentials(request sftp_server.AuthenticationRequest) (*sftp_server.AuthenticationResponse, error) {
|
func (r *PanelRequest) ValidateSftpCredentials(request sftp_server.AuthenticationRequest) (*sftp_server.AuthenticationResponse, error) {
|
||||||
@@ -23,13 +22,10 @@ func (r *PanelRequest) ValidateSftpCredentials(request sftp_server.Authenticatio
|
|||||||
|
|
||||||
if r.HasError() {
|
if r.HasError() {
|
||||||
if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 {
|
if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 {
|
||||||
zap.S().Debugw("failed to validate server credentials for SFTP", zap.String("error", r.Error().String()))
|
|
||||||
|
|
||||||
return nil, new(sftp_server.InvalidCredentialsError)
|
return nil, new(sftp_server.InvalidCredentialsError)
|
||||||
}
|
}
|
||||||
|
|
||||||
rerr := errors.New(r.Error().String())
|
rerr := errors.New(r.Error().String())
|
||||||
zap.S().Warnw("error validating SFTP credentials", zap.Error(rerr))
|
|
||||||
|
|
||||||
return nil, rerr
|
return nil, rerr
|
||||||
}
|
}
|
||||||
|
|||||||
60
cmd/config_finder.go
Normal file
60
cmd/config_finder.go
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// We've gone through a couple of iterations of where the configuration is stored. This
|
||||||
|
// helpful little function will look through the three areas it might have ended up, and
|
||||||
|
// return it.
|
||||||
|
//
|
||||||
|
// We only run this if the configuration flag for the instance is not actually passed in
|
||||||
|
// via the command line. Once found, the configuration is moved into the expected default
|
||||||
|
// location. Only errors are returned from this function, you can safely assume that after
|
||||||
|
// running this the configuration can be found in the correct default location.
|
||||||
|
func RelocateConfiguration() error {
|
||||||
|
var match string
|
||||||
|
check := []string{
|
||||||
|
config.DefaultLocation,
|
||||||
|
"/var/lib/pterodactyl/config.yml",
|
||||||
|
"/etc/wings/config.yml",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop over all of the configuration paths, and return which one we found, if
|
||||||
|
// any.
|
||||||
|
for _, p := range check {
|
||||||
|
if s, err := os.Stat(p); err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if !s.IsDir() {
|
||||||
|
match = p
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Just return a generic not exist error at this point if we didn't have a match, this
|
||||||
|
// will allow the caller to handle displaying a more friendly error to the user. If we
|
||||||
|
// did match in the default location, go ahead and return successfully.
|
||||||
|
if match == "" {
|
||||||
|
return os.ErrNotExist
|
||||||
|
} else if match == config.DefaultLocation {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The rest of this function simply creates the new default location and moves the
|
||||||
|
// old configuration file over to the new location, then sets the permissions on the
|
||||||
|
// file correctly so that only the user running this process can read it.
|
||||||
|
p, _ := filepath.Split(config.DefaultLocation)
|
||||||
|
if err := os.MkdirAll(p, 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(match, config.DefaultLocation); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.Chmod(config.DefaultLocation, 0600)
|
||||||
|
}
|
||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/AlecAivazis/survey/v2"
|
"github.com/AlecAivazis/survey/v2"
|
||||||
"github.com/AlecAivazis/survey/v2/terminal"
|
"github.com/AlecAivazis/survey/v2/terminal"
|
||||||
"github.com/creasty/defaults"
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -147,8 +146,8 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
|
|||||||
|
|
||||||
b, err := ioutil.ReadAll(res.Body)
|
b, err := ioutil.ReadAll(res.Body)
|
||||||
|
|
||||||
cfg := new(config.Configuration)
|
cfg, err := config.NewFromPath(configPath)
|
||||||
if err := defaults.Set(cfg); err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
226
cmd/diagnostics.go
Normal file
226
cmd/diagnostics.go
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os/exec"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/AlecAivazis/survey/v2"
|
||||||
|
"github.com/AlecAivazis/survey/v2/terminal"
|
||||||
|
"github.com/docker/cli/components/engine/pkg/parsers/operatingsystem"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/client"
|
||||||
|
"github.com/docker/docker/pkg/parsers/kernel"
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/system"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const DefaultHastebinUrl = "https://hastebin.com"
|
||||||
|
|
||||||
|
var (
|
||||||
|
diagnosticsArgs struct {
|
||||||
|
IncludeEndpoints bool
|
||||||
|
IncludeLogs bool
|
||||||
|
ReviewBeforeUpload bool
|
||||||
|
HastebinURL string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var diagnosticsCmd = &cobra.Command{
|
||||||
|
Use: "diagnostics",
|
||||||
|
Short: "Collect diagnostics information.",
|
||||||
|
Run: diagnosticsCmdRun,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
diagnosticsCmd.PersistentFlags().StringVar(&diagnosticsArgs.HastebinURL, "hastebin-url", DefaultHastebinUrl, "The url of the hastebin instance to use.")
|
||||||
|
}
|
||||||
|
|
||||||
|
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
|
||||||
|
// We collect:
|
||||||
|
// - wings and docker versions
|
||||||
|
// - relevant parts of daemon configuration
|
||||||
|
// - the docker debug output
|
||||||
|
// - running docker containers
|
||||||
|
// - logs
|
||||||
|
func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||||
|
questions := []*survey.Question{
|
||||||
|
{
|
||||||
|
Name: "IncludeEndpoints",
|
||||||
|
Prompt: &survey.Confirm{Message: "Do you want to include endpoints (i.e. the FQDN/IP of your panel)?", Default: false},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "IncludeLogs",
|
||||||
|
Prompt: &survey.Confirm{Message: "Do you want to include the latest logs?", Default: true},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "ReviewBeforeUpload",
|
||||||
|
Prompt: &survey.Confirm{
|
||||||
|
Message: "Do you want to review the collected data before uploading to hastebin.com?",
|
||||||
|
Help: "The data, especially the logs, might contain sensitive information, so you should review it. You will be asked again if you want to uplaod.",
|
||||||
|
Default: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
if err := survey.Ask(questions, &diagnosticsArgs); err != nil {
|
||||||
|
if err == terminal.InterruptErr {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dockerVersion, dockerInfo, dockerErr := getDockerInfo()
|
||||||
|
_ = dockerInfo
|
||||||
|
|
||||||
|
output := &strings.Builder{}
|
||||||
|
fmt.Fprintln(output, "Pterodactly Wings - Diagnostics Report")
|
||||||
|
printHeader(output, "Versions")
|
||||||
|
fmt.Fprintln(output, "wings:", system.Version)
|
||||||
|
if dockerErr == nil {
|
||||||
|
fmt.Fprintln(output, "Docker", dockerVersion.Version)
|
||||||
|
}
|
||||||
|
if v, err := kernel.GetKernelVersion(); err == nil {
|
||||||
|
fmt.Fprintln(output, "Kernel:", v)
|
||||||
|
}
|
||||||
|
if os, err := operatingsystem.GetOperatingSystem(); err == nil {
|
||||||
|
fmt.Fprintln(output, "OS:", os)
|
||||||
|
}
|
||||||
|
|
||||||
|
printHeader(output, "Wings Configuration")
|
||||||
|
if cfg, err := config.ReadConfiguration(config.DefaultLocation); cfg != nil {
|
||||||
|
fmt.Fprintln(output, "Panel Location:", redact(cfg.PanelLocation))
|
||||||
|
fmt.Fprintln(output, "Api Host:", redact(cfg.Api.Host))
|
||||||
|
fmt.Fprintln(output, "Api Port:", cfg.Api.Port)
|
||||||
|
fmt.Fprintln(output, "Api Ssl Enabled:", cfg.Api.Ssl.Enabled)
|
||||||
|
fmt.Fprintln(output, "Api Ssl Certificate:", redact(cfg.Api.Ssl.CertificateFile))
|
||||||
|
fmt.Fprintln(output, "Api Ssl Key:", redact(cfg.Api.Ssl.KeyFile))
|
||||||
|
fmt.Fprintln(output, "Sftp Address:", redact(cfg.System.Sftp.Address))
|
||||||
|
fmt.Fprintln(output, "Sftp Port:", cfg.System.Sftp.Port)
|
||||||
|
fmt.Fprintln(output, "Sftp Read Only:", cfg.System.Sftp.ReadOnly)
|
||||||
|
fmt.Fprintln(output, "Sftp Diskchecking Disabled:", cfg.System.Sftp.DisableDiskChecking)
|
||||||
|
fmt.Fprintln(output, "System Root Directory:", cfg.System.RootDirectory)
|
||||||
|
fmt.Fprintln(output, "System Logs Directory:", cfg.System.LogDirectory)
|
||||||
|
fmt.Fprintln(output, "System Data Directory:", cfg.System.Data)
|
||||||
|
fmt.Fprintln(output, "System Archive Directory:", cfg.System.ArchiveDirectory)
|
||||||
|
fmt.Fprintln(output, "System Backup Directory:", cfg.System.BackupDirectory)
|
||||||
|
fmt.Fprintln(output, "System Username:", cfg.System.Username)
|
||||||
|
fmt.Fprintln(output, "Debug Enabled:", cfg.Debug)
|
||||||
|
} else {
|
||||||
|
fmt.Println("Failed to load configuration.", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
printHeader(output, "Docker: Info")
|
||||||
|
fmt.Fprintln(output, "Server Version:", dockerInfo.ServerVersion)
|
||||||
|
fmt.Fprintln(output, "Storage Driver:", dockerInfo.Driver)
|
||||||
|
if dockerInfo.DriverStatus != nil {
|
||||||
|
for _, pair := range dockerInfo.DriverStatus {
|
||||||
|
fmt.Fprintf(output, " %s: %s\n", pair[0], pair[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dockerInfo.SystemStatus != nil {
|
||||||
|
for _, pair := range dockerInfo.SystemStatus {
|
||||||
|
fmt.Fprintf(output, " %s: %s\n", pair[0], pair[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintln(output, "LoggingDriver:", dockerInfo.LoggingDriver)
|
||||||
|
fmt.Fprintln(output, "CgroupDriver:", dockerInfo.CgroupDriver)
|
||||||
|
if len(dockerInfo.Warnings) > 0 {
|
||||||
|
for _, w := range dockerInfo.Warnings {
|
||||||
|
fmt.Fprintln(output, w)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
printHeader(output, "Docker: Running Containers")
|
||||||
|
c := exec.Command("docker", "ps")
|
||||||
|
if co, err := c.Output(); err == nil {
|
||||||
|
output.Write(co)
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(output, "Couldn't list containers: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
printHeader(output, "Latest Wings Logs")
|
||||||
|
if diagnosticsArgs.IncludeLogs {
|
||||||
|
fmt.Fprintln(output, "No logs found. Probably because nobody implemented logging to files yet :(")
|
||||||
|
} else {
|
||||||
|
fmt.Fprintln(output, "Logs redacted.")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("\n--------------- generated report ---------------")
|
||||||
|
fmt.Println(output.String())
|
||||||
|
fmt.Print("--------------- end of report ---------------\n\n")
|
||||||
|
|
||||||
|
upload := !diagnosticsArgs.ReviewBeforeUpload
|
||||||
|
if !upload {
|
||||||
|
survey.AskOne(&survey.Confirm{Message: "Upload to " + diagnosticsArgs.HastebinURL + "?", Default: false}, &upload)
|
||||||
|
}
|
||||||
|
if upload {
|
||||||
|
url, err := uploadToHastebin(diagnosticsArgs.HastebinURL, output.String())
|
||||||
|
if err == nil {
|
||||||
|
fmt.Println("Your report is available here: ", url)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getDockerInfo() (types.Version, types.Info, error) {
|
||||||
|
cli, err := client.NewClientWithOpts(client.FromEnv)
|
||||||
|
if err != nil {
|
||||||
|
return types.Version{}, types.Info{}, err
|
||||||
|
}
|
||||||
|
dockerVersion, err := cli.ServerVersion(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return types.Version{}, types.Info{}, err
|
||||||
|
}
|
||||||
|
dockerInfo, err := cli.Info(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return types.Version{}, types.Info{}, err
|
||||||
|
}
|
||||||
|
return dockerVersion, dockerInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func uploadToHastebin(hbUrl, content string) (string, error) {
|
||||||
|
r := strings.NewReader(content)
|
||||||
|
u, err := url.Parse(hbUrl)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
u.Path = path.Join(u.Path, "documents")
|
||||||
|
res, err := http.Post(u.String(), "plain/text", r)
|
||||||
|
if err != nil || res.StatusCode != 200 {
|
||||||
|
fmt.Println("Failed to upload report to ", u.String(), err)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
pres := make(map[string]interface{})
|
||||||
|
body, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("Failed to parse response.", err)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
json.Unmarshal(body, &pres)
|
||||||
|
if key, ok := pres["key"].(string); ok {
|
||||||
|
u, _ := url.Parse(hbUrl)
|
||||||
|
u.Path = path.Join(u.Path, key)
|
||||||
|
return u.String(), nil
|
||||||
|
}
|
||||||
|
return "", errors.New("Couldn't find key in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
func redact(s string) string {
|
||||||
|
if !diagnosticsArgs.IncludeEndpoints {
|
||||||
|
return "{redacted}"
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func printHeader(w io.Writer, title string) {
|
||||||
|
fmt.Fprintln(w, "\n|\n|", title)
|
||||||
|
fmt.Fprintln(w, "| ------------------------------")
|
||||||
|
}
|
||||||
163
cmd/root.go
163
cmd/root.go
@@ -8,6 +8,11 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/apex/log"
|
||||||
|
"github.com/mitchellh/colorstring"
|
||||||
|
"github.com/pterodactyl/wings/loggers/cli"
|
||||||
|
"golang.org/x/crypto/acme/autocert"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pkg/profile"
|
"github.com/pkg/profile"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
@@ -24,20 +29,33 @@ import (
|
|||||||
var configPath = config.DefaultLocation
|
var configPath = config.DefaultLocation
|
||||||
var debug = false
|
var debug = false
|
||||||
var shouldRunProfiler = false
|
var shouldRunProfiler = false
|
||||||
|
var useAutomaticTls = false
|
||||||
|
var tlsHostname = ""
|
||||||
|
var showVersion = false
|
||||||
|
|
||||||
var root = &cobra.Command{
|
var root = &cobra.Command{
|
||||||
Use: "wings",
|
Use: "wings",
|
||||||
Short: "The wings of the pterodactyl game management panel",
|
Short: "The wings of the pterodactyl game management panel",
|
||||||
Long: ``,
|
Long: ``,
|
||||||
|
PreRun: func(cmd *cobra.Command, args []string) {
|
||||||
|
if useAutomaticTls && len(tlsHostname) == 0 {
|
||||||
|
fmt.Println("A TLS hostname must be provided when running wings with automatic TLS, e.g.:\n\n ./wings --auto-tls --tls-hostname my.example.com")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
},
|
||||||
Run: rootCmdRun,
|
Run: rootCmdRun,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
root.PersistentFlags().BoolVar(&showVersion, "version", false, "show the version and exit")
|
||||||
root.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
root.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
||||||
root.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
root.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
||||||
root.PersistentFlags().BoolVar(&shouldRunProfiler, "profile", false, "pass in order to profile wings")
|
root.PersistentFlags().BoolVar(&shouldRunProfiler, "profile", false, "pass in order to profile wings")
|
||||||
|
root.PersistentFlags().BoolVar(&useAutomaticTls, "auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
|
||||||
|
root.PersistentFlags().StringVar(&tlsHostname, "tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
||||||
|
|
||||||
root.AddCommand(configureCmd)
|
root.AddCommand(configureCmd)
|
||||||
|
root.AddCommand(diagnosticsCmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the configuration path based on the arguments provided.
|
// Get the configuration path based on the arguments provided.
|
||||||
@@ -62,11 +80,27 @@ func readConfiguration() (*config.Configuration, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func rootCmdRun(*cobra.Command, []string) {
|
func rootCmdRun(*cobra.Command, []string) {
|
||||||
// Profile wings in production!!!!
|
if showVersion {
|
||||||
|
fmt.Println(system.Version)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
if shouldRunProfiler {
|
if shouldRunProfiler {
|
||||||
defer profile.Start().Stop()
|
defer profile.Start().Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only attempt configuration file relocation if a custom location has not
|
||||||
|
// been specified in the command startup.
|
||||||
|
if configPath == config.DefaultLocation {
|
||||||
|
if err := RelocateConfiguration(); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
exitWithConfigurationNotice()
|
||||||
|
}
|
||||||
|
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c, err := readConfiguration()
|
c, err := readConfiguration()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -81,10 +115,10 @@ func rootCmdRun(*cobra.Command, []string) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Infof("using configuration from path: %s", c.GetPath())
|
log.WithField("path", c.GetPath()).Info("loading configuration from path")
|
||||||
if c.Debug {
|
if c.Debug {
|
||||||
zap.S().Debugw("running in debug mode")
|
log.Debug("running in debug mode")
|
||||||
zap.S().Infow("certificate checking is disabled")
|
log.Info("certificate checking is disabled")
|
||||||
|
|
||||||
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
|
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||||
InsecureSkipVerify: true,
|
InsecureSkipVerify: true,
|
||||||
@@ -95,42 +129,47 @@ func rootCmdRun(*cobra.Command, []string) {
|
|||||||
config.SetDebugViaFlag(debug)
|
config.SetDebugViaFlag(debug)
|
||||||
|
|
||||||
if err := c.System.ConfigureDirectories(); err != nil {
|
if err := c.System.ConfigureDirectories(); err != nil {
|
||||||
zap.S().Panicw("failed to configure system directories for pterodactyl", zap.Error(err))
|
log.Fatal("failed to configure system directories for pterodactyl")
|
||||||
return
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Infof("checking for pterodactyl system user \"%s\"", c.System.Username)
|
log.WithField("username", c.System.Username).Info("checking for pterodactyl system user")
|
||||||
if su, err := c.EnsurePterodactylUser(); err != nil {
|
if su, err := c.EnsurePterodactylUser(); err != nil {
|
||||||
zap.S().Panicw("failed to create pterodactyl system user", zap.Error(err))
|
log.Error("failed to create pterodactyl system user")
|
||||||
|
panic(err)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
zap.S().Infow("configured system user", zap.String("username", su.Username), zap.String("uid", su.Uid), zap.String("gid", su.Gid))
|
log.WithFields(log.Fields{
|
||||||
|
"username": su.Username,
|
||||||
|
"uid": su.Uid,
|
||||||
|
"gid": su.Gid,
|
||||||
|
}).Info("configured system user successfully")
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Infow("beginning file permission setting on server data directories")
|
log.Info("beginning file permission setting on server data directories")
|
||||||
if err := c.EnsureFilePermissions(); err != nil {
|
if err := c.EnsureFilePermissions(); err != nil {
|
||||||
zap.S().Errorw("failed to properly chown data directories", zap.Error(err))
|
log.WithField("error", err).Error("failed to properly chown data directories")
|
||||||
} else {
|
} else {
|
||||||
zap.S().Infow("finished ensuring file permissions")
|
log.Info("finished ensuring file permissions")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := server.LoadDirectory(); err != nil {
|
if err := server.LoadDirectory(); err != nil {
|
||||||
zap.S().Fatalw("failed to load server configurations", zap.Error(errors.WithStack(err)))
|
log.WithField("error", err).Fatal("failed to load server configurations")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := environment.ConfigureDocker(&c.Docker); err != nil {
|
if err := environment.ConfigureDocker(&c.Docker); err != nil {
|
||||||
zap.S().Fatalw("failed to configure docker environment", zap.Error(errors.WithStack(err)))
|
log.WithField("error", err).Fatal("failed to configure docker environment")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.WriteToDisk(); err != nil {
|
if err := c.WriteToDisk(); err != nil {
|
||||||
zap.S().Errorw("failed to save configuration to disk", zap.Error(errors.WithStack(err)))
|
log.WithField("error", err).Error("failed to save configuration to disk")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just for some nice log output.
|
// Just for some nice log output.
|
||||||
for _, s := range server.GetServers().All() {
|
for _, s := range server.GetServers().All() {
|
||||||
zap.S().Infow("loaded configuration for server", zap.String("server", s.Uuid))
|
log.WithField("server", s.Id()).Info("loaded configuration for server")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new WaitGroup that limits us to 4 servers being bootstrapped at a time
|
// Create a new WaitGroup that limits us to 4 servers being bootstrapped at a time
|
||||||
@@ -144,16 +183,17 @@ func rootCmdRun(*cobra.Command, []string) {
|
|||||||
go func(s *server.Server) {
|
go func(s *server.Server) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
|
s.Log().Info("ensuring server environment exists")
|
||||||
|
|
||||||
// Create a server environment if none exists currently. This allows us to recover from Docker
|
// Create a server environment if none exists currently. This allows us to recover from Docker
|
||||||
// being reinstalled on the host system for example.
|
// being reinstalled on the host system for example.
|
||||||
zap.S().Infow("ensuring environment exists", zap.String("server", s.Uuid))
|
|
||||||
if err := s.Environment.Create(); err != nil {
|
if err := s.Environment.Create(); err != nil {
|
||||||
zap.S().Errorw("failed to create an environment for server", zap.String("server", s.Uuid), zap.Error(err))
|
s.Log().WithField("error", err).Error("failed to process environment")
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := s.Environment.IsRunning()
|
r, err := s.Environment.IsRunning()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorw("error checking server environment status", zap.String("server", s.Uuid), zap.Error(err))
|
s.Log().WithField("error", err).Error("error checking server environment status")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the server is currently running on Docker, mark the process as being in that state.
|
// If the server is currently running on Docker, mark the process as being in that state.
|
||||||
@@ -163,13 +203,9 @@ func rootCmdRun(*cobra.Command, []string) {
|
|||||||
// This will also validate that a server process is running if the last tracked state we have
|
// This will also validate that a server process is running if the last tracked state we have
|
||||||
// is that it was running, but we see that the container process is not currently running.
|
// is that it was running, but we see that the container process is not currently running.
|
||||||
if r || (!r && s.IsRunning()) {
|
if r || (!r && s.IsRunning()) {
|
||||||
zap.S().Infow("detected server is running, re-attaching to process", zap.String("server", s.Uuid))
|
s.Log().Info("detected server is running, re-attaching to process...")
|
||||||
if err := s.Environment.Start(); err != nil {
|
if err := s.Environment.Start(); err != nil {
|
||||||
zap.S().Warnw(
|
s.Log().WithField("error", errors.WithStack(err)).Warn("failed to properly start server detected as already running")
|
||||||
"failed to properly start server detected as already running",
|
|
||||||
zap.String("server", s.Uuid),
|
|
||||||
zap.Error(errors.WithStack(err)),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@@ -184,46 +220,60 @@ func rootCmdRun(*cobra.Command, []string) {
|
|||||||
// Wait until all of the servers are ready to go before we fire up the HTTP server.
|
// Wait until all of the servers are ready to go before we fire up the HTTP server.
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// If the SFTP subsystem should be started, do so now.
|
// Initalize SFTP.
|
||||||
if c.System.Sftp.UseInternalSystem {
|
|
||||||
sftp.Initialize(c)
|
sftp.Initialize(c)
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the archive directory exists.
|
// Ensure the archive directory exists.
|
||||||
if err := os.MkdirAll(c.System.ArchiveDirectory, 0755); err != nil {
|
if err := os.MkdirAll(c.System.ArchiveDirectory, 0755); err != nil {
|
||||||
zap.S().Errorw("failed to create archive directory", zap.Error(err))
|
log.WithField("error", err).Error("failed to create archive directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the backup directory exists.
|
// Ensure the backup directory exists.
|
||||||
if err := os.MkdirAll(c.System.BackupDirectory, 0755); err != nil {
|
if err := os.MkdirAll(c.System.BackupDirectory, 0755); err != nil {
|
||||||
zap.S().Errorw("failed to create backup directory", zap.Error(err))
|
log.WithField("error", err).Error("failed to create backup directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Infow("configuring webserver", zap.Bool("ssl", c.Api.Ssl.Enabled), zap.String("host", c.Api.Host), zap.Int("port", c.Api.Port))
|
log.WithFields(log.Fields{
|
||||||
|
"use_ssl": c.Api.Ssl.Enabled,
|
||||||
|
"use_auto_tls": useAutomaticTls && len(tlsHostname) > 0,
|
||||||
|
"host_address": c.Api.Host,
|
||||||
|
"host_port": c.Api.Port,
|
||||||
|
}).Info("configuring internal webserver")
|
||||||
|
|
||||||
r := router.Configure()
|
r := router.Configure()
|
||||||
addr := fmt.Sprintf("%s:%d", c.Api.Host, c.Api.Port)
|
addr := fmt.Sprintf("%s:%d", c.Api.Host, c.Api.Port)
|
||||||
|
|
||||||
if c.Api.Ssl.Enabled {
|
if useAutomaticTls && len(tlsHostname) > 0 {
|
||||||
|
m := autocert.Manager{
|
||||||
|
Prompt: autocert.AcceptTOS,
|
||||||
|
Cache: autocert.DirCache(path.Join(c.System.RootDirectory, "/.tls-cache")),
|
||||||
|
HostPolicy: autocert.HostWhitelist(tlsHostname),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithField("hostname", tlsHostname).
|
||||||
|
Info("webserver is now listening with auto-TLS enabled; certifcates will be automatically generated by Let's Encrypt")
|
||||||
|
|
||||||
|
// We don't use the autotls runner here since we need to specify a port other than 443
|
||||||
|
// to be using for SSL connections for Wings.
|
||||||
|
s := &http.Server{Addr: addr, TLSConfig: m.TLSConfig(), Handler: r}
|
||||||
|
|
||||||
|
go http.ListenAndServe(":http", m.HTTPHandler(nil))
|
||||||
|
if err := s.ListenAndServeTLS("", ""); err != nil {
|
||||||
|
log.WithFields(log.Fields{"auto_tls": true, "tls_hostname": tlsHostname, "error": err}).
|
||||||
|
Fatal("failed to configure HTTP server using auto-tls")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
} else if c.Api.Ssl.Enabled {
|
||||||
if err := r.RunTLS(addr, c.Api.Ssl.CertificateFile, c.Api.Ssl.KeyFile); err != nil {
|
if err := r.RunTLS(addr, c.Api.Ssl.CertificateFile, c.Api.Ssl.KeyFile); err != nil {
|
||||||
zap.S().Fatalw("failed to configure HTTPS server", zap.Error(err))
|
log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server")
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := r.Run(addr); err != nil {
|
if err := r.Run(addr); err != nil {
|
||||||
zap.S().Fatalw("failed to configure HTTP server", zap.Error(err))
|
log.WithField("error", err).Fatal("failed to configure HTTP server")
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// r := &Router{
|
|
||||||
// token: c.AuthenticationToken,
|
|
||||||
// upgrader: websocket.Upgrader{
|
|
||||||
// // Ensure that the websocket request is originating from the Panel itself,
|
|
||||||
// // and not some other location.
|
|
||||||
// CheckOrigin: func(r *http.Request) bool {
|
|
||||||
// return r.Header.Get("Origin") == c.PanelLocation
|
|
||||||
// },
|
|
||||||
// },
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute calls cobra to handle cli commands
|
// Execute calls cobra to handle cli commands
|
||||||
@@ -251,6 +301,9 @@ func configureLogging(debug bool) error {
|
|||||||
|
|
||||||
zap.ReplaceGlobals(logger)
|
zap.ReplaceGlobals(logger)
|
||||||
|
|
||||||
|
log.SetHandler(cli.Default)
|
||||||
|
log.SetLevel(log.DebugLevel)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -270,3 +323,23 @@ func printLogo() {
|
|||||||
fmt.Println(`Copyright © 2018 - 2020 Dane Everitt & Contributors`)
|
fmt.Println(`Copyright © 2018 - 2020 Dane Everitt & Contributors`)
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func exitWithConfigurationNotice() {
|
||||||
|
fmt.Print(colorstring.Color(`
|
||||||
|
[_red_][white][bold]Error: Configuration File Not Found[reset]
|
||||||
|
|
||||||
|
Wings was not able to locate your configuration file, and therefore is not
|
||||||
|
able to complete its boot process.
|
||||||
|
|
||||||
|
Please ensure you have copied your instance configuration file into
|
||||||
|
the default location, or have provided the --config flag to use a
|
||||||
|
custom location.
|
||||||
|
|
||||||
|
Default Location: /etc/pterodactyl/config.yml
|
||||||
|
|
||||||
|
[yellow]This is not a bug with this software. Please do not make a bug report
|
||||||
|
for this issue, it will be closed.[reset]
|
||||||
|
|
||||||
|
`))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,10 +3,10 @@ package config
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/cobaugh/osrelease"
|
"github.com/cobaugh/osrelease"
|
||||||
"github.com/creasty/defaults"
|
"github.com/creasty/defaults"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
"go.uber.org/zap"
|
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@@ -19,7 +19,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DefaultLocation = "/var/lib/pterodactyl/config.yml"
|
const DefaultLocation = "/etc/pterodactyl/config.yml"
|
||||||
|
|
||||||
type Configuration struct {
|
type Configuration struct {
|
||||||
sync.RWMutex `json:"-" yaml:"-"`
|
sync.RWMutex `json:"-" yaml:"-"`
|
||||||
@@ -46,9 +46,9 @@ type Configuration struct {
|
|||||||
// validate against it.
|
// validate against it.
|
||||||
AuthenticationToken string `json:"token" yaml:"token"`
|
AuthenticationToken string `json:"token" yaml:"token"`
|
||||||
|
|
||||||
Api ApiConfiguration
|
Api ApiConfiguration `json:"api" yaml:"api"`
|
||||||
System SystemConfiguration
|
System SystemConfiguration `json:"system" yaml:"system"`
|
||||||
Docker DockerConfiguration
|
Docker DockerConfiguration `json:"docker" yaml:"docker"`
|
||||||
|
|
||||||
// The amount of time in seconds that should elapse between disk usage checks
|
// The amount of time in seconds that should elapse between disk usage checks
|
||||||
// run by the daemon. Setting a higher number can result in better IO performance
|
// run by the daemon. Setting a higher number can result in better IO performance
|
||||||
@@ -80,13 +80,13 @@ type Configuration struct {
|
|||||||
// The location where the panel is running that this daemon should connect to
|
// The location where the panel is running that this daemon should connect to
|
||||||
// to collect data and send events.
|
// to collect data and send events.
|
||||||
PanelLocation string `json:"remote" yaml:"remote"`
|
PanelLocation string `json:"remote" yaml:"remote"`
|
||||||
|
|
||||||
|
// AllowedMounts .
|
||||||
|
AllowedMounts []string `json:"allowed_mounts" yaml:"allowed_mounts"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Defines the configuration of the internal SFTP server.
|
// Defines the configuration of the internal SFTP server.
|
||||||
type SftpConfiguration struct {
|
type SftpConfiguration struct {
|
||||||
// If set to false, the internal SFTP server will not be booted and you will need
|
|
||||||
// to run the SFTP server independent of this program.
|
|
||||||
UseInternalSystem bool `default:"true" json:"use_internal" yaml:"use_internal"`
|
|
||||||
// If set to true disk checking will not be performed. This will prevent the SFTP
|
// If set to true disk checking will not be performed. This will prevent the SFTP
|
||||||
// server from checking the total size of a directory when uploading files.
|
// server from checking the total size of a directory when uploading files.
|
||||||
DisableDiskChecking bool `default:"false" yaml:"disable_disk_checking"`
|
DisableDiskChecking bool `default:"false" yaml:"disable_disk_checking"`
|
||||||
@@ -135,7 +135,7 @@ func ReadConfiguration(path string) (*Configuration, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Track the location where we created this configuration.
|
// Track the location where we created this configuration.
|
||||||
c.path = path
|
c.unsafeSetPath(path)
|
||||||
|
|
||||||
// Replace environment variables within the configuration file with their
|
// Replace environment variables within the configuration file with their
|
||||||
// values from the host system.
|
// values from the host system.
|
||||||
@@ -189,8 +189,32 @@ func GetJwtAlgorithm() *jwt.HMACSHA {
|
|||||||
return _jwtAlgo
|
return _jwtAlgo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create a new struct and set the path where it should be stored.
|
||||||
|
func NewFromPath(path string) (*Configuration, error) {
|
||||||
|
c := new(Configuration)
|
||||||
|
if err := defaults.Set(c); err != nil {
|
||||||
|
return c, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c.unsafeSetPath(path)
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the path where the configuration file is located on the server. This function should
|
||||||
|
// not be called except by processes that are generating the configuration such as the configration
|
||||||
|
// command shipped with this software.
|
||||||
|
func (c *Configuration) unsafeSetPath(path string) {
|
||||||
|
c.Lock()
|
||||||
|
c.path = path
|
||||||
|
c.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// Returns the path for this configuration file.
|
// Returns the path for this configuration file.
|
||||||
func (c *Configuration) GetPath() string {
|
func (c *Configuration) GetPath() string {
|
||||||
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
|
||||||
return c.path
|
return c.path
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -248,11 +272,10 @@ func (c *Configuration) setSystemUser(u *user.User) error {
|
|||||||
gid, _ := strconv.Atoi(u.Gid)
|
gid, _ := strconv.Atoi(u.Gid)
|
||||||
|
|
||||||
c.Lock()
|
c.Lock()
|
||||||
defer c.Unlock()
|
|
||||||
|
|
||||||
c.System.Username = u.Username
|
c.System.Username = u.Username
|
||||||
c.System.User.Uid = uid
|
c.System.User.Uid = uid
|
||||||
c.System.User.Gid = gid
|
c.System.User.Gid = gid
|
||||||
|
c.Unlock()
|
||||||
|
|
||||||
return c.WriteToDisk()
|
return c.WriteToDisk()
|
||||||
}
|
}
|
||||||
@@ -299,7 +322,7 @@ func (c *Configuration) EnsureFilePermissions() error {
|
|||||||
gid, _ := strconv.Atoi(su.Gid)
|
gid, _ := strconv.Atoi(su.Gid)
|
||||||
|
|
||||||
if err := os.Chown(path.Join(c.System.Data, f.Name()), uid, gid); err != nil {
|
if err := os.Chown(path.Join(c.System.Data, f.Name()), uid, gid); err != nil {
|
||||||
zap.S().Warnw("failed to chown server directory", zap.String("directory", f.Name()), zap.Error(err))
|
log.WithField("error", err).WithField("directory", f.Name()).Warn("failed to chown server directory")
|
||||||
}
|
}
|
||||||
}(file)
|
}(file)
|
||||||
}
|
}
|
||||||
@@ -313,6 +336,10 @@ func (c *Configuration) EnsureFilePermissions() error {
|
|||||||
// lock on the file. This prevents something else from writing at the exact same time and
|
// lock on the file. This prevents something else from writing at the exact same time and
|
||||||
// leading to bad data conditions.
|
// leading to bad data conditions.
|
||||||
func (c *Configuration) WriteToDisk() error {
|
func (c *Configuration) WriteToDisk() error {
|
||||||
|
// Obtain an exclusive write against the configuration file.
|
||||||
|
c.writeLock.Lock()
|
||||||
|
defer c.writeLock.Unlock()
|
||||||
|
|
||||||
ccopy := *c
|
ccopy := *c
|
||||||
// If debugging is set with the flag, don't save that to the configuration file, otherwise
|
// If debugging is set with the flag, don't save that to the configuration file, otherwise
|
||||||
// you'll always end up in debug mode.
|
// you'll always end up in debug mode.
|
||||||
@@ -329,10 +356,6 @@ func (c *Configuration) WriteToDisk() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Obtain an exclusive write against the configuration file.
|
|
||||||
c.writeLock.Lock()
|
|
||||||
defer c.writeLock.Unlock()
|
|
||||||
|
|
||||||
if err := ioutil.WriteFile(c.GetPath(), b, 0644); err != nil {
|
if err := ioutil.WriteFile(c.GetPath(), b, 0644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ type dockerNetworkInterfaces struct {
|
|||||||
type DockerNetworkConfiguration struct {
|
type DockerNetworkConfiguration struct {
|
||||||
// The interface that should be used to create the network. Must not conflict
|
// The interface that should be used to create the network. Must not conflict
|
||||||
// with any other interfaces in use by Docker or on the system.
|
// with any other interfaces in use by Docker or on the system.
|
||||||
Interface string `default:"172.18.0.1"`
|
Interface string `default:"172.18.0.1" json:"interface" yaml:"interface"`
|
||||||
|
|
||||||
// The DNS settings for containers.
|
// The DNS settings for containers.
|
||||||
Dns []string `default:"[\"1.1.1.1\", \"1.0.0.1\"]"`
|
Dns []string `default:"[\"1.1.1.1\", \"1.0.0.1\"]"`
|
||||||
@@ -26,6 +26,7 @@ type DockerNetworkConfiguration struct {
|
|||||||
Name string `default:"pterodactyl_nw"`
|
Name string `default:"pterodactyl_nw"`
|
||||||
ISPN bool `default:"false" yaml:"ispn"`
|
ISPN bool `default:"false" yaml:"ispn"`
|
||||||
Driver string `default:"bridge"`
|
Driver string `default:"bridge"`
|
||||||
|
Mode string `default:"pterodactyl_nw" yaml:"network_mode"`
|
||||||
IsInternal bool `default:"false" yaml:"is_internal"`
|
IsInternal bool `default:"false" yaml:"is_internal"`
|
||||||
EnableICC bool `default:"true" yaml:"enable_icc"`
|
EnableICC bool `default:"true" yaml:"enable_icc"`
|
||||||
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
|
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
|
||||||
@@ -38,13 +39,16 @@ type DockerConfiguration struct {
|
|||||||
// for containers run through the daemon.
|
// for containers run through the daemon.
|
||||||
Network DockerNetworkConfiguration `json:"network" yaml:"network"`
|
Network DockerNetworkConfiguration `json:"network" yaml:"network"`
|
||||||
|
|
||||||
|
// Domainname is the Docker domainname for all containers.
|
||||||
|
Domainname string `default:"" json:"domainname" yaml:"domainname"`
|
||||||
|
|
||||||
// If true, container images will be updated when a server starts if there
|
// If true, container images will be updated when a server starts if there
|
||||||
// is an update available. If false the daemon will not attempt updates and will
|
// is an update available. If false the daemon will not attempt updates and will
|
||||||
// defer to the host system to manage image updates.
|
// defer to the host system to manage image updates.
|
||||||
UpdateImages bool `default:"true" json:"update_images" yaml:"update_images"`
|
UpdateImages bool `default:"true" json:"update_images" yaml:"update_images"`
|
||||||
|
|
||||||
// The location of the Docker socket.
|
// The location of the Docker socket.
|
||||||
Socket string `default:"/var/run/docker.sock"`
|
Socket string `default:"/var/run/docker.sock" json:"socket" yaml:"socket"`
|
||||||
|
|
||||||
// Defines the location of the timezone file on the host system that should
|
// Defines the location of the timezone file on the host system that should
|
||||||
// be mounted into the created containers so that they all use the same time.
|
// be mounted into the created containers so that they all use the same time.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"go.uber.org/zap"
|
"github.com/apex/log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
)
|
)
|
||||||
@@ -51,33 +51,33 @@ type SystemConfiguration struct {
|
|||||||
// the user did not press the stop button, but the process stopped cleanly.
|
// the user did not press the stop button, but the process stopped cleanly.
|
||||||
DetectCleanExitAsCrash bool `default:"true" yaml:"detect_clean_exit_as_crash"`
|
DetectCleanExitAsCrash bool `default:"true" yaml:"detect_clean_exit_as_crash"`
|
||||||
|
|
||||||
Sftp *SftpConfiguration `yaml:"sftp"`
|
Sftp SftpConfiguration `yaml:"sftp"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensures that all of the system directories exist on the system. These directories are
|
// Ensures that all of the system directories exist on the system. These directories are
|
||||||
// created so that only the owner can read the data, and no other users.
|
// created so that only the owner can read the data, and no other users.
|
||||||
func (sc *SystemConfiguration) ConfigureDirectories() error {
|
func (sc *SystemConfiguration) ConfigureDirectories() error {
|
||||||
zap.S().Debugw("ensuring root data directory exists", zap.String("path", sc.RootDirectory))
|
log.WithField("path", sc.RootDirectory).Debug("ensuring root data directory exists")
|
||||||
if err := os.MkdirAll(sc.RootDirectory, 0700); err != nil {
|
if err := os.MkdirAll(sc.RootDirectory, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("ensuring log directory exists", zap.String("path", sc.LogDirectory))
|
log.WithField("path", sc.LogDirectory).Debug("ensuring log directory exists")
|
||||||
if err := os.MkdirAll(path.Join(sc.LogDirectory, "/install"), 0700); err != nil {
|
if err := os.MkdirAll(path.Join(sc.LogDirectory, "/install"), 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("ensuring server data directory exists", zap.String("path", sc.Data))
|
log.WithField("path", sc.Data).Debug("ensuring server data directory exists")
|
||||||
if err := os.MkdirAll(sc.Data, 0700); err != nil {
|
if err := os.MkdirAll(sc.Data, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("ensuring archive data directory exists", zap.String("path", sc.ArchiveDirectory))
|
log.WithField("path", sc.ArchiveDirectory).Debug("ensuring archive data directory exists")
|
||||||
if err := os.MkdirAll(sc.ArchiveDirectory, 0700); err != nil {
|
if err := os.MkdirAll(sc.ArchiveDirectory, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("ensuring backup data directory exists", zap.String("path", sc.BackupDirectory))
|
log.WithField("path", sc.BackupDirectory).Debug("ensuring backup data directory exists")
|
||||||
if err := os.MkdirAll(sc.BackupDirectory, 0700); err != nil {
|
if err := os.MkdirAll(sc.BackupDirectory, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
26
docker-compose.example.yml
Normal file
26
docker-compose.example.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
daemon:
|
||||||
|
build: .
|
||||||
|
restart: always
|
||||||
|
hostname: daemon
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
- "2022:2022"
|
||||||
|
tty: true
|
||||||
|
environment:
|
||||||
|
- "DEBUG=false"
|
||||||
|
volumes:
|
||||||
|
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||||
|
- "/var/lib/docker/containers/:/var/lib/docker/containers/"
|
||||||
|
- "/var/lib/pterodactyl/:/var/lib/pterodactyl/"
|
||||||
|
- "/srv/daemon-data/:/srv/daemon-data/"
|
||||||
|
- "/tmp/pterodactyl/:/tmp/pterodactyl/"
|
||||||
|
- "/etc/timezone:/etc/timezone:ro"
|
||||||
|
## Required for ssl if you user let's encrypt. uncomment to use.
|
||||||
|
## - "/etc/letsencrypt/:/etc/letsencrypt/"
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 172.21.0.0/16
|
||||||
@@ -2,12 +2,12 @@ package environment
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/apex/log"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Configures the required network for the docker environment.
|
// Configures the required network for the docker environment.
|
||||||
@@ -20,10 +20,10 @@ func ConfigureDocker(c *config.DockerConfiguration) error {
|
|||||||
|
|
||||||
resource, err := cli.NetworkInspect(context.Background(), c.Network.Name, types.NetworkInspectOptions{})
|
resource, err := cli.NetworkInspect(context.Background(), c.Network.Name, types.NetworkInspectOptions{})
|
||||||
if err != nil && client.IsErrNotFound(err) {
|
if err != nil && client.IsErrNotFound(err) {
|
||||||
zap.S().Infow("creating missing pterodactyl0 interface, this could take a few seconds...")
|
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
|
||||||
return createDockerNetwork(cli, c)
|
return createDockerNetwork(cli, c)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
zap.S().Fatalw("failed to create required docker network for containers", zap.Error(err))
|
log.WithField("error", err).Fatal("failed to create required docker network for containers")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch resource.Driver {
|
switch resource.Driver {
|
||||||
|
|||||||
40
go.mod
40
go.mod
@@ -1,6 +1,6 @@
|
|||||||
module github.com/pterodactyl/wings
|
module github.com/pterodactyl/wings
|
||||||
|
|
||||||
go 1.12
|
go 1.13
|
||||||
|
|
||||||
// Uncomment this in development environments to make changes to the core SFTP
|
// Uncomment this in development environments to make changes to the core SFTP
|
||||||
// server software. This assumes you're using the official Pterodactyl Environment
|
// server software. This assumes you're using the official Pterodactyl Environment
|
||||||
@@ -15,58 +15,62 @@ require (
|
|||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||||
github.com/Jeffail/gabs/v2 v2.2.0
|
github.com/Jeffail/gabs/v2 v2.2.0
|
||||||
github.com/Microsoft/go-winio v0.4.7 // indirect
|
github.com/Microsoft/go-winio v0.4.7 // indirect
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
github.com/apex/log v1.3.0
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a
|
||||||
github.com/beevik/etree v1.1.0
|
github.com/beevik/etree v1.1.0
|
||||||
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929
|
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929
|
||||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
||||||
|
github.com/containerd/containerd v1.3.6 // indirect
|
||||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
|
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
|
||||||
github.com/creasty/defaults v1.3.0
|
github.com/creasty/defaults v1.3.0
|
||||||
|
github.com/docker/cli v17.12.1-ce-rc2+incompatible
|
||||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||||
github.com/docker/docker v0.0.0-20180422163414-57142e89befe
|
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/docker/go-units v0.3.3 // indirect
|
github.com/docker/go-units v0.3.3 // indirect
|
||||||
|
github.com/fatih/color v1.9.0
|
||||||
github.com/gabriel-vasile/mimetype v0.1.4
|
github.com/gabriel-vasile/mimetype v0.1.4
|
||||||
|
github.com/gammazero/workerpool v0.0.0-20200608033439-1a5ca90a5753
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0
|
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0
|
||||||
github.com/ghodss/yaml v1.0.0
|
github.com/gin-gonic/gin v1.6.3
|
||||||
github.com/gin-gonic/gin v1.6.2
|
|
||||||
github.com/golang/protobuf v1.3.5 // indirect
|
github.com/golang/protobuf v1.3.5 // indirect
|
||||||
github.com/google/uuid v1.1.1
|
github.com/google/uuid v1.1.1
|
||||||
|
github.com/gorilla/mux v1.7.4 // indirect
|
||||||
github.com/gorilla/websocket v1.4.0
|
github.com/gorilla/websocket v1.4.0
|
||||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
|
||||||
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334
|
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334
|
||||||
|
github.com/icza/dyno v0.0.0-20200205103839-49cb13720835
|
||||||
github.com/imdario/mergo v0.3.8
|
github.com/imdario/mergo v0.3.8
|
||||||
github.com/klauspost/pgzip v1.2.3
|
github.com/klauspost/pgzip v1.2.3
|
||||||
github.com/magiconair/properties v1.8.1
|
github.com/magiconair/properties v1.8.1
|
||||||
|
github.com/mattn/go-colorable v0.1.4
|
||||||
github.com/mattn/go-shellwords v1.0.10 // indirect
|
github.com/mattn/go-shellwords v1.0.10 // indirect
|
||||||
github.com/mholt/archiver/v3 v3.3.0
|
github.com/mholt/archiver/v3 v3.3.0
|
||||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/pkg/profile v1.4.0
|
github.com/pkg/profile v1.4.0
|
||||||
github.com/pkg/sftp v1.10.1 // indirect
|
github.com/pkg/sftp v1.11.0 // indirect
|
||||||
github.com/pterodactyl/sftp-server v1.1.1
|
github.com/pterodactyl/sftp-server v1.1.4
|
||||||
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce
|
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94
|
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94
|
||||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||||
github.com/spf13/cobra v0.0.7
|
github.com/spf13/cobra v0.0.7
|
||||||
github.com/stretchr/testify v1.5.1 // indirect
|
github.com/stretchr/testify v1.5.1 // indirect
|
||||||
go.uber.org/atomic v1.5.1 // indirect
|
go.uber.org/zap v1.15.0
|
||||||
go.uber.org/multierr v1.4.0 // indirect
|
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79
|
||||||
go.uber.org/zap v1.13.0
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
|
||||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 // indirect
|
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 // indirect
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
|
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e // indirect
|
|
||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
|
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
|
||||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 // indirect
|
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f // indirect
|
||||||
golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290 // indirect
|
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||||
gopkg.in/ini.v1 v1.51.0
|
gopkg.in/ini.v1 v1.51.0
|
||||||
gopkg.in/yaml.v2 v2.2.8
|
gopkg.in/yaml.v2 v2.2.8
|
||||||
gotest.tools v2.2.0+incompatible // indirect
|
gotest.tools v2.2.0+incompatible // indirect
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
139
go.sum
139
go.sum
@@ -11,26 +11,36 @@ github.com/Microsoft/go-winio v0.4.7 h1:vOvDiY/F1avSWlCWiKJjdYKz2jVjTK3pWPHndeG4
|
|||||||
github.com/Microsoft/go-winio v0.4.7/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
github.com/Microsoft/go-winio v0.4.7/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||||
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=
|
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=
|
||||||
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
|
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
|
||||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||||
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6 h1:bZ28Hqta7TFAK3Q08CMvv8y3/8ATaEqv2nGoc6yff6c=
|
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6 h1:bZ28Hqta7TFAK3Q08CMvv8y3/8ATaEqv2nGoc6yff6c=
|
||||||
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U=
|
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U=
|
||||||
|
github.com/apex/log v1.3.0 h1:1fyfbPvUwD10nMoh3hY6MXzvZShJQn9/ck7ATgAt5pA=
|
||||||
|
github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs=
|
||||||
|
github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
|
||||||
|
github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE=
|
||||||
|
github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
|
||||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||||
|
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||||
|
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo=
|
||||||
|
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||||
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
|
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
|
||||||
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929 h1:MW/JDk68Rny52yI0M0N+P8lySNgB+NhpI/uAmhgOhUM=
|
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929 h1:MW/JDk68Rny52yI0M0N+P8lySNgB+NhpI/uAmhgOhUM=
|
||||||
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ=
|
github.com/buger/jsonparser v0.0.0-20191204142016-1a29609e0929/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249 h1:R0IDH8daQ3lODvu8YtxnIqqth5qMGCJyADoUQvmLx4o=
|
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249 h1:R0IDH8daQ3lODvu8YtxnIqqth5qMGCJyADoUQvmLx4o=
|
||||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249/go.mod h1:EHKW9yNEYSBpTKzuu7Y9oOrft/UlzH57rMIB03oev6M=
|
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249/go.mod h1:EHKW9yNEYSBpTKzuu7Y9oOrft/UlzH57rMIB03oev6M=
|
||||||
|
github.com/containerd/containerd v1.3.6 h1:SMfcKoQyWhaRsYq7290ioC6XFcHDNcHvcEMjF6ORpac=
|
||||||
|
github.com/containerd/containerd v1.3.6/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=
|
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4=
|
||||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
@@ -46,31 +56,44 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
|
github.com/docker/cli v17.12.1-ce-rc2+incompatible h1:ESUycEAqvFuLglAHkUW66rCc2djYtd3i1x231svLq9o=
|
||||||
|
github.com/docker/cli v17.12.1-ce-rc2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v0.0.0-20180422163414-57142e89befe h1:VW8TnWi0CZgg7oCv0wH6evNwkzcJg/emnw4HrVIWws4=
|
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible h1:iWPIG7pWIsCwT6ZtHnTUpoVMnete7O/pzd9HFE3+tn8=
|
||||||
github.com/docker/docker v0.0.0-20180422163414-57142e89befe/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
|
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||||
|
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
|
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
|
||||||
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
|
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
|
||||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||||
|
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||||
|
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||||
|
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||||
|
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/gabriel-vasile/mimetype v0.1.4 h1:5mcsq3+DXypREUkW+1juhjeKmE/XnWgs+paHMJn7lf8=
|
github.com/gabriel-vasile/mimetype v0.1.4 h1:5mcsq3+DXypREUkW+1juhjeKmE/XnWgs+paHMJn7lf8=
|
||||||
github.com/gabriel-vasile/mimetype v0.1.4/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI=
|
github.com/gabriel-vasile/mimetype v0.1.4/go.mod h1:kMJbg3SlWZCsj4R73F1WDzbT9AyGCOVmUtIxxwO5pmI=
|
||||||
|
github.com/gammazero/deque v0.0.0-20200227231300-1e9af0e52b46 h1:iX4+rD9Fjdx8SkmSO/O5WAIX/j79ll3kuqv5VdYt9J8=
|
||||||
|
github.com/gammazero/deque v0.0.0-20200227231300-1e9af0e52b46/go.mod h1:D90+MBHVc9Sk1lJAbEVgws0eYEurY4mv2TDso3Nxh3w=
|
||||||
|
github.com/gammazero/workerpool v0.0.0-20200608033439-1a5ca90a5753 h1:oSQ61LxZkz3Z4La0O5cbyVDvLWEfbNgiD43cSPdjPQQ=
|
||||||
|
github.com/gammazero/workerpool v0.0.0-20200608033439-1a5ca90a5753/go.mod h1:/XWO2YAUUpPi3smDlFBl0vpX0JHwUomDM/oRMwRmnSs=
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0 h1:7KeiSrO5puFH1+vdAdbpiie2TrNnkvFc/eOQzT60Z2k=
|
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0 h1:7KeiSrO5puFH1+vdAdbpiie2TrNnkvFc/eOQzT60Z2k=
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0/go.mod h1:D1+3UtCYAJ1os1PI+zhTVEj6Tb+IHJvXjXKz83OstmM=
|
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.0/go.mod h1:D1+3UtCYAJ1os1PI+zhTVEj6Tb+IHJvXjXKz83OstmM=
|
||||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||||
github.com/gin-gonic/gin v1.6.2 h1:88crIK23zO6TqlQBt+f9FrPJNKm9ZEr7qjp9vl/d5TM=
|
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
|
||||||
github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
|
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
|
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
|
||||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||||
@@ -92,6 +115,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
|
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
|
||||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
@@ -108,23 +132,30 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
|||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
|
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||||
|
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
|
||||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ=
|
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ=
|
||||||
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
|
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
|
||||||
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8=
|
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8=
|
||||||
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
|
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
|
||||||
|
github.com/icza/dyno v0.0.0-20200205103839-49cb13720835 h1:f1irK5f03uGGj+FjgQfZ5VhdKNVQVJ4skHsedzVohQ4=
|
||||||
|
github.com/icza/dyno v0.0.0-20200205103839-49cb13720835/go.mod h1:c1tRKs5Tx7E2+uHGSyyncziFjvGpgv4H2HrqXeUQ/Uk=
|
||||||
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
||||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
|
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
|
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
|
||||||
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
|
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
|
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
|
||||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
@@ -150,6 +181,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.4 h1:5Myjjh3JY/NaAi4IsUbHADytDyl1VE1Y9PXDlL+P/VQ=
|
github.com/kr/pty v1.1.4 h1:5Myjjh3JY/NaAi4IsUbHADytDyl1VE1Y9PXDlL+P/VQ=
|
||||||
github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
@@ -162,13 +194,19 @@ github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQ
|
|||||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||||
|
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||||
|
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
|
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||||
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
||||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
||||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||||
@@ -184,12 +222,16 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
|
|||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
|
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||||
|
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
github.com/nwaples/rardecode v1.0.0 h1:r7vGuS5akxOnR4JQSkko62RJ1ReCMXxQRPtxsiFMBOs=
|
github.com/nwaples/rardecode v1.0.0 h1:r7vGuS5akxOnR4JQSkko62RJ1ReCMXxQRPtxsiFMBOs=
|
||||||
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
||||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||||
@@ -209,35 +251,51 @@ github.com/pkg/profile v1.4.0 h1:uCmaf4vVbWAOZz36k1hrQD7ijGRzLwaME8Am/7a4jZI=
|
|||||||
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
|
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
|
||||||
github.com/pkg/sftp v1.8.3 h1:9jSe2SxTM8/3bXZjtqnkgTBW+lA8db0knZJyns7gpBA=
|
github.com/pkg/sftp v1.8.3 h1:9jSe2SxTM8/3bXZjtqnkgTBW+lA8db0knZJyns7gpBA=
|
||||||
github.com/pkg/sftp v1.8.3/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
github.com/pkg/sftp v1.8.3/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk=
|
||||||
github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=
|
github.com/pkg/sftp v1.11.0 h1:4Zv0OGbpkg4yNuUtH0s8rvoYxRCNyT29NVUo6pgPmxI=
|
||||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||||
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
|
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
|
||||||
|
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
|
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
|
||||||
|
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
|
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
|
||||||
|
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/pterodactyl/sftp-server v1.1.1 h1:IjuOy21BNZxfejKnXG1RgLxXAYylDqBVpbKZ6+fG5FQ=
|
github.com/pterodactyl/sftp-server v1.1.4 h1:JESuEuZ+d2tajMjuQblPOlGISM9Uc2xOzk7irVF9PQ0=
|
||||||
github.com/pterodactyl/sftp-server v1.1.1/go.mod h1:b1VVWYv0RF9rxSZQqaD/rYXriiRMNPsbV//CKMXR4ag=
|
github.com/pterodactyl/sftp-server v1.1.4/go.mod h1:KjSONrenRr1oCh94QIVAU6yEzMe+Hd7r/JHrh5/oQHs=
|
||||||
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce h1:aP+C+YbHZfOQlutA4p4soHi7rVUqHQdWEVMSkHfDTqY=
|
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce h1:aP+C+YbHZfOQlutA4p4soHi7rVUqHQdWEVMSkHfDTqY=
|
||||||
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
|
github.com/remeh/sizedwaitgroup v0.0.0-20180822144253-5e7302b12cce/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
|
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0=
|
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0=
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ=
|
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ=
|
||||||
|
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
|
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
|
||||||
|
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||||
|
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
|
||||||
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
|
github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
@@ -257,6 +315,12 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy
|
|||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA=
|
||||||
|
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
|
||||||
|
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
|
||||||
|
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
|
||||||
|
github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds=
|
||||||
|
github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/uber-go/zap v1.9.1/go.mod h1:GY+83l3yxBcBw2kmHu/sAWwItnTn+ynxHCRo+WiIQOY=
|
github.com/uber-go/zap v1.9.1/go.mod h1:GY+83l3yxBcBw2kmHu/sAWwItnTn+ynxHCRo+WiIQOY=
|
||||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||||
@@ -275,51 +339,53 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
|||||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
|
||||||
go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM=
|
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||||
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
|
||||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
|
||||||
go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E=
|
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
|
||||||
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU=
|
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
|
||||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8=
|
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79 h1:IaQbIIB2X/Mp/DKctl6ROxz1KyMlKp4uyvL6+kQ7C88=
|
||||||
golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
|
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
|
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 h1:Jcxah/M+oLZ/R4/z5RzfPzGbPXnVDPkEDtf2JnuxN+U=
|
||||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@@ -332,16 +398,19 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03i
|
|||||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4 h1:opSr2sbRXk5X5/givKrrKj9HXxFpW2sdCiP8MJSKLQY=
|
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI=
|
||||||
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||||
@@ -359,9 +428,10 @@ golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtn
|
|||||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290 h1:NXNmtp0ToD36cui5IqWy95LC4Y6vT/4y3RnPxlQPinU=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.0.0-20200417140056-c07e33ef3290/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5 h1:MeC2gMlMdkd67dn17MEby3rGXRxZtWeiRXOnISfTQ74=
|
||||||
|
golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools/gopls v0.1.3/go.mod h1:vrCQzOKxvuiZLjCKSmbbov04oeBQQOb4VQqwYK2PWIY=
|
golang.org/x/tools/gopls v0.1.3/go.mod h1:vrCQzOKxvuiZLjCKSmbbov04oeBQQOb4VQqwYK2PWIY=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@@ -369,19 +439,24 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
|
|||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
|
||||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
|
||||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||||
@@ -393,3 +468,5 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81
|
|||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
|
||||||
|
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
|
|||||||
@@ -2,13 +2,13 @@ package installer
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/asaskevich/govalidator"
|
"github.com/asaskevich/govalidator"
|
||||||
"github.com/buger/jsonparser"
|
"github.com/buger/jsonparser"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"go.uber.org/zap"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
)
|
)
|
||||||
@@ -29,12 +29,10 @@ func New(data []byte) (*Installer, error) {
|
|||||||
return nil, NewValidationError("service egg provided was not in a valid format")
|
return nil, NewValidationError("service egg provided was not in a valid format")
|
||||||
}
|
}
|
||||||
|
|
||||||
s := &server.Server{
|
cfg := &server.Configuration{
|
||||||
Uuid: getString(data, "uuid"),
|
Uuid: getString(data, "uuid"),
|
||||||
Suspended: false,
|
Suspended: false,
|
||||||
State: server.ProcessOfflineState,
|
|
||||||
Invocation: getString(data, "invocation"),
|
Invocation: getString(data, "invocation"),
|
||||||
EnvVars: make(map[string]string),
|
|
||||||
Build: server.BuildSettings{
|
Build: server.BuildSettings{
|
||||||
MemoryLimit: getInt(data, "build", "memory"),
|
MemoryLimit: getInt(data, "build", "memory"),
|
||||||
Swap: getInt(data, "build", "swap"),
|
Swap: getInt(data, "build", "swap"),
|
||||||
@@ -43,20 +41,18 @@ func New(data []byte) (*Installer, error) {
|
|||||||
DiskSpace: getInt(data, "build", "disk"),
|
DiskSpace: getInt(data, "build", "disk"),
|
||||||
Threads: getString(data, "build", "threads"),
|
Threads: getString(data, "build", "threads"),
|
||||||
},
|
},
|
||||||
Allocations: server.Allocations{
|
CrashDetectionEnabled: true,
|
||||||
Mappings: make(map[string][]int),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Allocations.DefaultMapping.Ip = getString(data, "allocations", "default", "ip")
|
cfg.Allocations.DefaultMapping.Ip = getString(data, "allocations", "default", "ip")
|
||||||
s.Allocations.DefaultMapping.Port = int(getInt(data, "allocations", "default", "port"))
|
cfg.Allocations.DefaultMapping.Port = int(getInt(data, "allocations", "default", "port"))
|
||||||
|
|
||||||
// Unmarshal the environment variables from the request into the server struct.
|
// Unmarshal the environment variables from the request into the server struct.
|
||||||
if b, _, _, err := jsonparser.Get(data, "environment"); err != nil {
|
if b, _, _, err := jsonparser.Get(data, "environment"); err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
} else {
|
} else {
|
||||||
s.EnvVars = make(map[string]string)
|
cfg.EnvVars = make(server.EnvironmentVariables)
|
||||||
if err := json.Unmarshal(b, &s.EnvVars); err != nil {
|
if err := json.Unmarshal(b, &cfg.EnvVars); err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -65,15 +61,15 @@ func New(data []byte) (*Installer, error) {
|
|||||||
if b, _, _, err := jsonparser.Get(data, "allocations", "mappings"); err != nil {
|
if b, _, _, err := jsonparser.Get(data, "allocations", "mappings"); err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
} else {
|
} else {
|
||||||
s.Allocations.Mappings = make(map[string][]int)
|
cfg.Allocations.Mappings = make(map[string][]int)
|
||||||
if err := json.Unmarshal(b, &s.Allocations.Mappings); err != nil {
|
if err := json.Unmarshal(b, &cfg.Allocations.Mappings); err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Container.Image = getString(data, "container", "image")
|
cfg.Container.Image = getString(data, "container", "image")
|
||||||
|
|
||||||
c, rerr, err := api.NewRequester().GetServerConfiguration(s.Uuid)
|
c, rerr, err := api.NewRequester().GetServerConfiguration(cfg.Uuid)
|
||||||
if err != nil || rerr != nil {
|
if err != nil || rerr != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
@@ -82,21 +78,18 @@ func New(data []byte) (*Installer, error) {
|
|||||||
return nil, errors.New(rerr.String())
|
return nil, errors.New(rerr.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroy the temporary server instance.
|
|
||||||
s = nil
|
|
||||||
|
|
||||||
// Create a new server instance using the configuration we wrote to the disk
|
// Create a new server instance using the configuration we wrote to the disk
|
||||||
// so that everything gets instantiated correctly on the struct.
|
// so that everything gets instantiated correctly on the struct.
|
||||||
s2, err := server.FromConfiguration(c)
|
s, err := server.FromConfiguration(c)
|
||||||
|
|
||||||
return &Installer{
|
return &Installer{
|
||||||
server: s2,
|
server: s,
|
||||||
}, err
|
}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the UUID associated with this installer instance.
|
// Returns the UUID associated with this installer instance.
|
||||||
func (i *Installer) Uuid() string {
|
func (i *Installer) Uuid() string {
|
||||||
return i.server.Uuid
|
return i.server.Id()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the server instance.
|
// Return the server instance.
|
||||||
@@ -108,24 +101,27 @@ func (i *Installer) Server() *server.Server {
|
|||||||
// associated installation process based on the parameters passed through for
|
// associated installation process based on the parameters passed through for
|
||||||
// the server instance.
|
// the server instance.
|
||||||
func (i *Installer) Execute() {
|
func (i *Installer) Execute() {
|
||||||
zap.S().Debugw("creating required server data directory", zap.String("server", i.Uuid()))
|
p := path.Join(config.Get().System.Data, i.Uuid())
|
||||||
if err := os.MkdirAll(path.Join(config.Get().System.Data, i.Uuid()), 0755); err != nil {
|
l := log.WithFields(log.Fields{"server": i.Uuid(), "process": "installer"})
|
||||||
zap.S().Errorw("failed to create server data directory", zap.String("server", i.Uuid()), zap.Error(errors.WithStack(err)))
|
|
||||||
|
l.WithField("path", p).Debug("creating required server data directory")
|
||||||
|
if err := os.MkdirAll(p, 0755); err != nil {
|
||||||
|
l.WithFields(log.Fields{"path": p, "error": errors.WithStack(err)}).Error("failed to create server data directory")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Chown(path.Join(config.Get().System.Data, i.Uuid()), config.Get().System.User.Uid, config.Get().System.User.Gid); err != nil {
|
if err := os.Chown(p, config.Get().System.User.Uid, config.Get().System.User.Gid); err != nil {
|
||||||
zap.S().Errorw("failed to chown server data directory", zap.String("server", i.Uuid()), zap.Error(errors.WithStack(err)))
|
l.WithField("error", errors.WithStack(err)).Error("failed to chown server data directory")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("creating required environment for server instance", zap.String("server", i.Uuid()))
|
l.Debug("creating required environment for server instance")
|
||||||
if err := i.server.Environment.Create(); err != nil {
|
if err := i.server.Environment.Create(); err != nil {
|
||||||
zap.S().Errorw("failed to create environment for server", zap.String("server", i.Uuid()), zap.Error(err))
|
l.WithField("error", err).Error("failed to create environment for server")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("created environment for server during install process", zap.String("server", i.Uuid()))
|
l.Info("successfully created environment for server during install process")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a string value from the JSON data provided.
|
// Returns a string value from the JSON data provided.
|
||||||
|
|||||||
112
loggers/cli/cli.go
Normal file
112
loggers/cli/cli.go
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
package cli
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/apex/log"
|
||||||
|
"github.com/apex/log/handlers/cli"
|
||||||
|
color2 "github.com/fatih/color"
|
||||||
|
"github.com/mattn/go-colorable"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var Default = New(os.Stderr)
|
||||||
|
|
||||||
|
var bold = color2.New(color2.Bold)
|
||||||
|
|
||||||
|
var Strings = [...]string{
|
||||||
|
log.DebugLevel: "DEBUG",
|
||||||
|
log.InfoLevel: " INFO",
|
||||||
|
log.WarnLevel: " WARN",
|
||||||
|
log.ErrorLevel: "ERROR",
|
||||||
|
log.FatalLevel: "FATAL",
|
||||||
|
}
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
Writer io.Writer
|
||||||
|
Padding int
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(w io.Writer) *Handler {
|
||||||
|
if f, ok := w.(*os.File); ok {
|
||||||
|
return &Handler{Writer: colorable.NewColorable(f), Padding: 2}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Handler{Writer: w, Padding: 2}
|
||||||
|
}
|
||||||
|
|
||||||
|
type tracer interface {
|
||||||
|
StackTrace() errors.StackTrace
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleLog implements log.Handler.
|
||||||
|
func (h *Handler) HandleLog(e *log.Entry) error {
|
||||||
|
color := cli.Colors[e.Level]
|
||||||
|
level := Strings[e.Level]
|
||||||
|
names := e.Fields.Names()
|
||||||
|
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
|
color.Fprintf(h.Writer, "%s: [%s] %-25s", bold.Sprintf("%*s", h.Padding+1, level), time.Now().Format(time.StampMilli), e.Message)
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
if name == "source" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(h.Writer, " %s=%v", color.Sprint(name), e.Fields.Get(name))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(h.Writer)
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
if name != "error" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var br = color2.New(color2.Bold, color2.FgRed)
|
||||||
|
if err, ok := e.Fields.Get("error").(error); ok {
|
||||||
|
fmt.Fprintf(h.Writer, "\n%s%+v\n\n", br.Sprintf("Stacktrace:"), getErrorStack(err, false))
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(h.Writer, "\n%s%+v\n\n", br.Sprintf("Invalid Error:"), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getErrorStack(err error, i bool) errors.StackTrace {
|
||||||
|
e, ok := errors.Cause(err).(tracer)
|
||||||
|
if !ok {
|
||||||
|
if i {
|
||||||
|
// Just abort out of this and return a stacktrace leading up to this point. It isn't perfect
|
||||||
|
// but it'll at least include what function lead to this being called which we can then handle.
|
||||||
|
return errors.Wrap(err, "failed to generate stacktrace for caught error").(tracer).StackTrace()
|
||||||
|
}
|
||||||
|
|
||||||
|
return getErrorStack(errors.New(err.Error()), true)
|
||||||
|
}
|
||||||
|
|
||||||
|
st := e.StackTrace()
|
||||||
|
|
||||||
|
l := len(st)
|
||||||
|
// If this was an internal stack generation we're going to skip over the top four items in the stack
|
||||||
|
// trace since they'll point to the error that was generated by this function.
|
||||||
|
f := 0
|
||||||
|
if i {
|
||||||
|
f = 4
|
||||||
|
}
|
||||||
|
|
||||||
|
if i && l > 9 {
|
||||||
|
l = 9
|
||||||
|
} else if !i && l > 5 {
|
||||||
|
l = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
return st[f:l]
|
||||||
|
}
|
||||||
@@ -3,13 +3,12 @@ package parser
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"github.com/Jeffail/gabs/v2"
|
"github.com/Jeffail/gabs/v2"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/buger/jsonparser"
|
"github.com/buger/jsonparser"
|
||||||
"github.com/iancoleman/strcase"
|
"github.com/iancoleman/strcase"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -48,13 +47,14 @@ func readFileBytes(path string) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Gets the value of a key based on the value type defined.
|
// Gets the value of a key based on the value type defined.
|
||||||
func getKeyValue(value []byte) interface{} {
|
func (cfr *ConfigurationFileReplacement) getKeyValue(value []byte) interface{} {
|
||||||
if reflect.ValueOf(value).Kind() == reflect.Bool {
|
if cfr.ReplaceWith.Type() == jsonparser.Boolean {
|
||||||
v, _ := strconv.ParseBool(string(value))
|
v, _ := strconv.ParseBool(string(value))
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to parse into an int, if this fails just ignore the error and
|
// Try to parse into an int, if this fails just ignore the error and continue
|
||||||
|
// through, returning the string.
|
||||||
if v, err := strconv.Atoi(string(value)); err == nil {
|
if v, err := strconv.Atoi(string(value)); err == nil {
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
@@ -70,7 +70,9 @@ func getKeyValue(value []byte) interface{} {
|
|||||||
// configurations per-world (such as Spigot and Bungeecord) where we'll need to make
|
// configurations per-world (such as Spigot and Bungeecord) where we'll need to make
|
||||||
// adjustments to the bind address for the user.
|
// adjustments to the bind address for the user.
|
||||||
//
|
//
|
||||||
// This does not currently support nested matches. container.*.foo.*.bar will not work.
|
// This does not currently support nested wildcard matches. For example, foo.*.bar
|
||||||
|
// will work, however foo.*.bar.*.baz will not, since we'll only be splitting at the
|
||||||
|
// first wildcard, and not subsequent ones.
|
||||||
func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error) {
|
func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error) {
|
||||||
parsed, err := gabs.ParseJSON(data)
|
parsed, err := gabs.ParseJSON(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -94,12 +96,12 @@ func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error
|
|||||||
// If the child is a null value, nothing will happen. Seems reasonable as of the
|
// If the child is a null value, nothing will happen. Seems reasonable as of the
|
||||||
// time this code is being written.
|
// time this code is being written.
|
||||||
for _, child := range parsed.Path(strings.Trim(parts[0], ".")).Children() {
|
for _, child := range parsed.Path(strings.Trim(parts[0], ".")).Children() {
|
||||||
if err := v.SetAtPathway(child, strings.Trim(parts[1], "."), value); err != nil {
|
if err := v.SetAtPathway(child, strings.Trim(parts[1], "."), []byte(value)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err = v.SetAtPathway(parsed, v.Match, value); err != nil {
|
if err = v.SetAtPathway(parsed, v.Match, []byte(value)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -118,11 +120,9 @@ func (cfr *ConfigurationFileReplacement) SetAtPathway(c *gabs.Container, path st
|
|||||||
// We're doing some regex here.
|
// We're doing some regex here.
|
||||||
r, err := regexp.Compile(strings.TrimPrefix(cfr.IfValue, "regex:"))
|
r, err := regexp.Compile(strings.TrimPrefix(cfr.IfValue, "regex:"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Warnw(
|
log.WithFields(log.Fields{"if_value": strings.TrimPrefix(cfr.IfValue, "regex:"), "error": err}).
|
||||||
"configuration if_value using invalid regexp, cannot do replacement",
|
Warn("configuration if_value using invalid regexp, cannot perform replacement")
|
||||||
zap.String("if_value", strings.TrimPrefix(cfr.IfValue, "regex:")),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -143,18 +143,18 @@ func (cfr *ConfigurationFileReplacement) SetAtPathway(c *gabs.Container, path st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := c.SetP(getKeyValue(value), path)
|
_, err := c.SetP(cfr.getKeyValue(value), path)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Looks up a configuration value on the Daemon given a dot-notated syntax.
|
// Looks up a configuration value on the Daemon given a dot-notated syntax.
|
||||||
func (f *ConfigurationFile) LookupConfigurationValue(cfr ConfigurationFileReplacement) ([]byte, error) {
|
func (f *ConfigurationFile) LookupConfigurationValue(cfr ConfigurationFileReplacement) (string, error) {
|
||||||
// If this is not something that we can do a regex lookup on then just continue
|
// If this is not something that we can do a regex lookup on then just continue
|
||||||
// on our merry way. If the value isn't a string, we're not going to be doing anything
|
// on our merry way. If the value isn't a string, we're not going to be doing anything
|
||||||
// with it anyways.
|
// with it anyways.
|
||||||
if cfr.ReplaceWith.Type() != jsonparser.String || !configMatchRegex.Match(cfr.ReplaceWith.Value()) {
|
if cfr.ReplaceWith.Type() != jsonparser.String || !configMatchRegex.Match(cfr.ReplaceWith.Value()) {
|
||||||
return cfr.ReplaceWith.Value(), nil
|
return cfr.ReplaceWith.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a match, lookup the value in the configuration for the Daemon. If no key
|
// If there is a match, lookup the value in the configuration for the Daemon. If no key
|
||||||
@@ -165,11 +165,8 @@ func (f *ConfigurationFile) LookupConfigurationValue(cfr ConfigurationFileReplac
|
|||||||
)
|
)
|
||||||
|
|
||||||
var path []string
|
var path []string
|
||||||
// The camel casing is important here, the configuration for the Daemon does not use
|
|
||||||
// JSON, and as such all of the keys will be generated in CamelCase format, rather than
|
|
||||||
// the expected snake_case from the old Daemon.
|
|
||||||
for _, value := range strings.Split(huntPath, ".") {
|
for _, value := range strings.Split(huntPath, ".") {
|
||||||
path = append(path, strcase.ToCamel(value))
|
path = append(path, strcase.ToSnake(value))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for the key in the configuration file, and if found return that value to the
|
// Look for the key in the configuration file, and if found return that value to the
|
||||||
@@ -177,21 +174,15 @@ func (f *ConfigurationFile) LookupConfigurationValue(cfr ConfigurationFileReplac
|
|||||||
match, _, _, err := jsonparser.Get(f.configuration, path...)
|
match, _, _, err := jsonparser.Get(f.configuration, path...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != jsonparser.KeyPathNotFoundError {
|
if err != jsonparser.KeyPathNotFoundError {
|
||||||
return match, errors.WithStack(err)
|
return string(match), errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw(
|
log.WithFields(log.Fields{"path": path, "filename": f.FileName}).Debug("attempted to load a configuration value that does not exist")
|
||||||
"attempted to load a configuration value that does not exist",
|
|
||||||
zap.Strings("path", path),
|
|
||||||
zap.String("filename", f.FileName),
|
|
||||||
)
|
|
||||||
|
|
||||||
// If there is no key, keep the original value intact, that way it is obvious there
|
// If there is no key, keep the original value intact, that way it is obvious there
|
||||||
// is a replace issue at play.
|
// is a replace issue at play.
|
||||||
return match, nil
|
return string(match), nil
|
||||||
} else {
|
} else {
|
||||||
replaced := []byte(configMatchRegex.ReplaceAllString(cfr.ReplaceWith.String(), string(match)))
|
return configMatchRegex.ReplaceAllString(cfr.ReplaceWith.String(), string(match)), nil
|
||||||
|
|
||||||
return replaced, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
110
parser/parser.go
110
parser/parser.go
@@ -3,16 +3,18 @@ package parser
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/beevik/etree"
|
"github.com/beevik/etree"
|
||||||
"github.com/buger/jsonparser"
|
"github.com/buger/jsonparser"
|
||||||
"github.com/ghodss/yaml"
|
"github.com/icza/dyno"
|
||||||
"github.com/magiconair/properties"
|
"github.com/magiconair/properties"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"go.uber.org/zap"
|
|
||||||
"gopkg.in/ini.v1"
|
"gopkg.in/ini.v1"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -28,6 +30,10 @@ const (
|
|||||||
|
|
||||||
type ConfigurationParser string
|
type ConfigurationParser string
|
||||||
|
|
||||||
|
func (cp ConfigurationParser) String() string {
|
||||||
|
return string(cp)
|
||||||
|
}
|
||||||
|
|
||||||
// Defines a configuration file for the server startup. These will be looped over
|
// Defines a configuration file for the server startup. These will be looped over
|
||||||
// and modified before the server finishes booting.
|
// and modified before the server finishes booting.
|
||||||
type ConfigurationFile struct {
|
type ConfigurationFile struct {
|
||||||
@@ -40,6 +46,40 @@ type ConfigurationFile struct {
|
|||||||
configuration []byte
|
configuration []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Custom unmarshaler for configuration files. If there is an error while parsing out the
|
||||||
|
// replacements, don't fail the entire operation, just log a global warning so someone can
|
||||||
|
// find the issue, and return an empty array of replacements.
|
||||||
|
//
|
||||||
|
// I imagine people will notice configuration replacement isn't working correctly and then
|
||||||
|
// the logs should help better expose that issue.
|
||||||
|
func (f *ConfigurationFile) UnmarshalJSON(data []byte) error {
|
||||||
|
var m map[string]*json.RawMessage
|
||||||
|
if err := json.Unmarshal(data, &m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(*m["file"], &f.FileName); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(*m["parser"], &f.Parser); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(*m["replace"], &f.Replace); err != nil {
|
||||||
|
log.WithField("file", f.FileName).WithField("error", err).Warn("failed to unmarshal configuration file replacement")
|
||||||
|
|
||||||
|
f.Replace = []ConfigurationFileReplacement{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regex to match paths such as foo[1].bar[2] and convert them into a format that
|
||||||
|
// gabs can work with, such as foo.1.bar.2 in this case. This is applied when creating
|
||||||
|
// the struct for the configuration file replacements.
|
||||||
|
var cfrMatchReplacement = regexp.MustCompile(`\[(\d+)]`)
|
||||||
|
|
||||||
// Defines a single find/replace instance for a given server configuration file.
|
// Defines a single find/replace instance for a given server configuration file.
|
||||||
type ConfigurationFileReplacement struct {
|
type ConfigurationFileReplacement struct {
|
||||||
Match string `json:"match"`
|
Match string `json:"match"`
|
||||||
@@ -52,22 +92,34 @@ type ConfigurationFileReplacement struct {
|
|||||||
func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
|
func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
|
||||||
m, err := jsonparser.GetString(data, "match")
|
m, err := jsonparser.GetString(data, "match")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return err
|
||||||
}
|
}
|
||||||
cfr.Match = m
|
|
||||||
|
// See comment on the replacement regex to understand what exactly this is doing.
|
||||||
|
cfr.Match = cfrMatchReplacement.ReplaceAllString(m, ".$1")
|
||||||
|
|
||||||
iv, err := jsonparser.GetString(data, "if_value")
|
iv, err := jsonparser.GetString(data, "if_value")
|
||||||
// We only check keypath here since match & replace_with should be present on all of
|
// We only check keypath here since match & replace_with should be present on all of
|
||||||
// them, however if_value is optional.
|
// them, however if_value is optional.
|
||||||
if err != nil && err != jsonparser.KeyPathNotFoundError {
|
if err != nil && err != jsonparser.KeyPathNotFoundError {
|
||||||
return errors.WithStack(err)
|
return err
|
||||||
}
|
}
|
||||||
cfr.IfValue = iv
|
cfr.IfValue = iv
|
||||||
|
|
||||||
rw, dt, _, err := jsonparser.Get(data, "replace_with")
|
rw, dt, _, err := jsonparser.Get(data, "replace_with")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
if err != jsonparser.KeyPathNotFoundError {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Okay, likely dealing with someone who forgot to upgrade their eggs, so in
|
||||||
|
// that case, fallback to using the old key which was "value".
|
||||||
|
rw, dt, _, err = jsonparser.Get(data, "value")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cfr.ReplaceWith = ReplaceValue{
|
cfr.ReplaceWith = ReplaceValue{
|
||||||
value: rw,
|
value: rw,
|
||||||
valueType: dt,
|
valueType: dt,
|
||||||
@@ -79,10 +131,13 @@ func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
|
|||||||
// Parses a given configuration file and updates all of the values within as defined
|
// Parses a given configuration file and updates all of the values within as defined
|
||||||
// in the API response from the Panel.
|
// in the API response from the Panel.
|
||||||
func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
||||||
zap.S().Debugw("parsing configuration file", zap.String("path", path), zap.String("parser", string(f.Parser)))
|
log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
|
||||||
|
|
||||||
mb, _ := json.Marshal(config.Get())
|
if mb, err := json.Marshal(config.Get()); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
f.configuration = mb
|
f.configuration = mb
|
||||||
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@@ -181,13 +236,13 @@ func (f *ConfigurationFile) parseXmlFile(path string) error {
|
|||||||
|
|
||||||
// Iterate over the elements we found and update their values.
|
// Iterate over the elements we found and update their values.
|
||||||
for _, element := range doc.FindElements(path) {
|
for _, element := range doc.FindElements(path) {
|
||||||
if xmlValueMatchRegex.Match(value) {
|
if xmlValueMatchRegex.MatchString(value) {
|
||||||
k := xmlValueMatchRegex.ReplaceAllString(string(value), "$1")
|
k := xmlValueMatchRegex.ReplaceAllString(value, "$1")
|
||||||
v := xmlValueMatchRegex.ReplaceAllString(string(value), "$2")
|
v := xmlValueMatchRegex.ReplaceAllString(value, "$2")
|
||||||
|
|
||||||
element.CreateAttr(k, v)
|
element.CreateAttr(k, v)
|
||||||
} else {
|
} else {
|
||||||
element.SetText(string(value))
|
element.SetText(value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -218,12 +273,13 @@ func (f *ConfigurationFile) parseXmlFile(path string) error {
|
|||||||
// Parses an ini file.
|
// Parses an ini file.
|
||||||
func (f *ConfigurationFile) parseIniFile(path string) error {
|
func (f *ConfigurationFile) parseIniFile(path string) error {
|
||||||
// Ini package can't handle a non-existent file, so handle that automatically here
|
// Ini package can't handle a non-existent file, so handle that automatically here
|
||||||
// by creating it if not exists.
|
// by creating it if not exists. Then, immediately close the file since we will use
|
||||||
|
// other methods to write the new contents.
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0644)
|
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
file.Close()
|
||||||
|
|
||||||
cfg, err := ini.Load(path)
|
cfg, err := ini.Load(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -258,24 +314,15 @@ func (f *ConfigurationFile) parseIniFile(path string) error {
|
|||||||
// If the key exists in the file go ahead and set the value, otherwise try to
|
// If the key exists in the file go ahead and set the value, otherwise try to
|
||||||
// create it in the section.
|
// create it in the section.
|
||||||
if s.HasKey(k) {
|
if s.HasKey(k) {
|
||||||
s.Key(k).SetValue(string(value))
|
s.Key(k).SetValue(value)
|
||||||
} else {
|
} else {
|
||||||
if _, err := s.NewKey(k, string(value)); err != nil {
|
if _, err := s.NewKey(k, value); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate the file before attempting to write the changes.
|
return cfg.SaveTo(path)
|
||||||
if err := os.Truncate(path, 0); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := cfg.WriteTo(file); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a json file updating any matching key/value pairs. If a match is not found, the
|
// Parses a json file updating any matching key/value pairs. If a match is not found, the
|
||||||
@@ -304,10 +351,15 @@ func (f *ConfigurationFile) parseYamlFile(path string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i := make(map[string]interface{})
|
||||||
|
if err := yaml.Unmarshal(b, &i); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Unmarshal the yaml data into a JSON interface such that we can work with
|
// Unmarshal the yaml data into a JSON interface such that we can work with
|
||||||
// any arbitrary data structure. If we don't do this, I can't use gabs which
|
// any arbitrary data structure. If we don't do this, I can't use gabs which
|
||||||
// makes working with unknown JSON signficiantly easier.
|
// makes working with unknown JSON signficiantly easier.
|
||||||
jsonBytes, err := yaml.YAMLToJSON(b)
|
jsonBytes, err := json.Marshal(dyno.ConvertMapI2MapS(i))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -320,7 +372,7 @@ func (f *ConfigurationFile) parseYamlFile(path string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remarshal the JSON into YAML format before saving it back to the disk.
|
// Remarshal the JSON into YAML format before saving it back to the disk.
|
||||||
marshaled, err := yaml.JSONToYAML(data.Bytes())
|
marshaled, err := yaml.Marshal(data.Data())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -392,7 +444,7 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, _, err := p.Set(replace.Match, string(data)); err != nil {
|
if _, _, err := p.Set(replace.Match, data); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,9 @@ func (cv *ReplaceValue) Value() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cv *ReplaceValue) String() string {
|
func (cv *ReplaceValue) String() string {
|
||||||
return string(cv.value)
|
str, _ := jsonparser.ParseString(cv.value)
|
||||||
|
|
||||||
|
return str
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cv *ReplaceValue) Type() jsonparser.ValueType {
|
func (cv *ReplaceValue) Type() jsonparser.ValueType {
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ package router
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"go.uber.org/zap"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
@@ -33,13 +33,21 @@ func TrackedError(err error) *RequestError {
|
|||||||
// generated this server for the purposes of logging.
|
// generated this server for the purposes of logging.
|
||||||
func TrackedServerError(err error, s *server.Server) *RequestError {
|
func TrackedServerError(err error, s *server.Server) *RequestError {
|
||||||
return &RequestError{
|
return &RequestError{
|
||||||
Err: err,
|
Err: errors.WithStack(err),
|
||||||
Uuid: uuid.Must(uuid.NewRandom()).String(),
|
Uuid: uuid.Must(uuid.NewRandom()).String(),
|
||||||
Message: "",
|
Message: "",
|
||||||
server: s,
|
server: s,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *RequestError) logger() *log.Entry {
|
||||||
|
if e.server != nil {
|
||||||
|
return e.server.Log().WithField("error_id", e.Uuid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return log.WithField("error_id", e.Uuid)
|
||||||
|
}
|
||||||
|
|
||||||
// Sets the output message to display to the user in the error.
|
// Sets the output message to display to the user in the error.
|
||||||
func (e *RequestError) SetMessage(msg string) *RequestError {
|
func (e *RequestError) SetMessage(msg string) *RequestError {
|
||||||
e.Message = msg
|
e.Message = msg
|
||||||
@@ -53,6 +61,8 @@ func (e *RequestError) AbortWithStatus(status int, c *gin.Context) {
|
|||||||
// If this error is because the resource does not exist, we likely do not need to log
|
// If this error is because the resource does not exist, we likely do not need to log
|
||||||
// the error anywhere, just return a 404 and move on with our lives.
|
// the error anywhere, just return a 404 and move on with our lives.
|
||||||
if os.IsNotExist(e.Err) {
|
if os.IsNotExist(e.Err) {
|
||||||
|
e.logger().WithField("error", e.Err).Debug("encountered os.IsNotExist error while handling request")
|
||||||
|
|
||||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||||
"error": "The requested resource was not found on the system.",
|
"error": "The requested resource was not found on the system.",
|
||||||
})
|
})
|
||||||
@@ -61,19 +71,11 @@ func (e *RequestError) AbortWithStatus(status int, c *gin.Context) {
|
|||||||
|
|
||||||
// Otherwise, log the error to zap, and then report the error back to the user.
|
// Otherwise, log the error to zap, and then report the error back to the user.
|
||||||
if status >= 500 {
|
if status >= 500 {
|
||||||
if e.server != nil {
|
e.logger().WithField("error", e.Err).Error("encountered HTTP/500 error while handling request")
|
||||||
zap.S().Errorw("encountered error while handling HTTP request", zap.String("server", e.server.Uuid), zap.String("error_id", e.Uuid), zap.Error(e.Err))
|
|
||||||
} else {
|
|
||||||
zap.S().Errorw("encountered error while handling HTTP request", zap.String("error_id", e.Uuid), zap.Error(e.Err))
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Error(errors.WithStack(e))
|
c.Error(errors.WithStack(e))
|
||||||
} else {
|
} else {
|
||||||
if e.server != nil {
|
e.logger().WithField("error", e.Err).Debug("encountered non-HTTP/500 error while handling request")
|
||||||
zap.S().Debugw("encountered error while handling HTTP request", zap.String("server", e.server.Uuid), zap.String("error_id", e.Uuid), zap.Error(e.Err))
|
|
||||||
} else {
|
|
||||||
zap.S().Debugw("encountered error while handling HTTP request", zap.String("error_id", e.Uuid), zap.Error(e.Err))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := "An unexpected error was encountered while processing this request."
|
msg := "An unexpected error was encountered while processing this request."
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ func AuthorizationMiddleware(c *gin.Context) {
|
|||||||
// Helper function to fetch a server out of the servers collection stored in memory.
|
// Helper function to fetch a server out of the servers collection stored in memory.
|
||||||
func GetServer(uuid string) *server.Server {
|
func GetServer(uuid string) *server.Server {
|
||||||
return server.GetServers().Find(func(s *server.Server) bool {
|
return server.GetServers().Find(func(s *server.Server) bool {
|
||||||
return uuid == s.Uuid
|
return uuid == s.Id()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,7 +58,7 @@ func ServerExists(c *gin.Context) {
|
|||||||
u, err := uuid.Parse(c.Param("server"))
|
u, err := uuid.Parse(c.Param("server"))
|
||||||
if err != nil || GetServer(u.String()) == nil {
|
if err != nil || GetServer(u.String()) == nil {
|
||||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||||
"error": "The requested server does not exist.",
|
"error": "The resource you requested does not exist.",
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,32 @@
|
|||||||
package router
|
package router
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Configures the routing infrastructure for this daemon instance.
|
// Configures the routing infrastructure for this daemon instance.
|
||||||
func Configure() *gin.Engine {
|
func Configure() *gin.Engine {
|
||||||
router := gin.Default()
|
gin.SetMode("release")
|
||||||
|
|
||||||
|
router := gin.New()
|
||||||
|
|
||||||
|
router.Use(gin.Recovery())
|
||||||
router.Use(SetAccessControlHeaders)
|
router.Use(SetAccessControlHeaders)
|
||||||
|
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
||||||
|
// This should still dump requests in debug mode since it does help with understanding the request
|
||||||
|
// lifecycle and quickly seeing what was called leading to the logs. However, it isn't feasible to mix
|
||||||
|
// this output in production and still get meaningful logs from it since they'll likely just be a huge
|
||||||
|
// spamfest.
|
||||||
|
router.Use(gin.LoggerWithFormatter(func(params gin.LogFormatterParams) string {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"client_ip": params.ClientIP,
|
||||||
|
"status": params.StatusCode,
|
||||||
|
"latency": params.Latency,
|
||||||
|
}).Debugf("%s %s", params.MethodColor()+params.Method+params.ResetColor(), params.Path)
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}))
|
||||||
|
|
||||||
router.OPTIONS("/api/system", func(c *gin.Context) {
|
router.OPTIONS("/api/system", func(c *gin.Context) {
|
||||||
c.Status(200)
|
c.Status(200)
|
||||||
@@ -20,12 +39,12 @@ func Configure() *gin.Engine {
|
|||||||
// This route is special it sits above all of the other requests because we are
|
// This route is special it sits above all of the other requests because we are
|
||||||
// using a JWT to authorize access to it, therefore it needs to be publicly
|
// using a JWT to authorize access to it, therefore it needs to be publicly
|
||||||
// accessible.
|
// accessible.
|
||||||
router.GET("/api/servers/:server/ws", getServerWebsocket)
|
router.GET("/api/servers/:server/ws", ServerExists, getServerWebsocket)
|
||||||
|
|
||||||
// This request is called by another daemon when a server is going to be transferred out.
|
// This request is called by another daemon when a server is going to be transferred out.
|
||||||
// This request does not need the AuthorizationMiddleware as the panel should never call it
|
// This request does not need the AuthorizationMiddleware as the panel should never call it
|
||||||
// and requests are authenticated through a JWT the panel issues to the other daemon.
|
// and requests are authenticated through a JWT the panel issues to the other daemon.
|
||||||
router.GET("/api/servers/:server/archive", getServerArchive)
|
router.GET("/api/servers/:server/archive", ServerExists, getServerArchive)
|
||||||
|
|
||||||
// All of the routes beyond this mount will use an authorization middleware
|
// All of the routes beyond this mount will use an authorization middleware
|
||||||
// and will not be accessible without the correct Authorization header provided.
|
// and will not be accessible without the correct Authorization header provided.
|
||||||
@@ -59,11 +78,13 @@ func Configure() *gin.Engine {
|
|||||||
{
|
{
|
||||||
files.GET("/contents", getServerFileContents)
|
files.GET("/contents", getServerFileContents)
|
||||||
files.GET("/list-directory", getServerListDirectory)
|
files.GET("/list-directory", getServerListDirectory)
|
||||||
files.PUT("/rename", putServerRenameFile)
|
files.PUT("/rename", putServerRenameFiles)
|
||||||
files.POST("/copy", postServerCopyFile)
|
files.POST("/copy", postServerCopyFile)
|
||||||
files.POST("/write", postServerWriteFile)
|
files.POST("/write", postServerWriteFile)
|
||||||
files.POST("/create-directory", postServerCreateDirectory)
|
files.POST("/create-directory", postServerCreateDirectory)
|
||||||
files.POST("/delete", postServerDeleteFile)
|
files.POST("/delete", postServerDeleteFiles)
|
||||||
|
files.POST("/compress", postServerCompressFiles)
|
||||||
|
files.POST("/decompress", postServerDecompressFiles)
|
||||||
}
|
}
|
||||||
|
|
||||||
backup := server.Group("/backup")
|
backup := server.Group("/backup")
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ package router
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"go.uber.org/zap"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -46,7 +46,10 @@ func postServerPower(c *gin.Context) {
|
|||||||
s := GetServer(c.Param("server"))
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
var data server.PowerAction
|
var data server.PowerAction
|
||||||
c.BindJSON(&data)
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||||
|
if err := c.BindJSON(&data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if !data.IsValid() {
|
if !data.IsValid() {
|
||||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||||
@@ -61,7 +64,7 @@ func postServerPower(c *gin.Context) {
|
|||||||
//
|
//
|
||||||
// We don't really care about any of the other actions at this point, they'll all result
|
// We don't really care about any of the other actions at this point, they'll all result
|
||||||
// in the process being stopped, which should have happened anyways if the server is suspended.
|
// in the process being stopped, which should have happened anyways if the server is suspended.
|
||||||
if (data.Action == "start" || data.Action == "restart") && s.Suspended {
|
if (data.Action == "start" || data.Action == "restart") && s.IsSuspended() {
|
||||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||||
"error": "Cannot start or restart a server that is suspended.",
|
"error": "Cannot start or restart a server that is suspended.",
|
||||||
})
|
})
|
||||||
@@ -71,15 +74,12 @@ func postServerPower(c *gin.Context) {
|
|||||||
// Pass the actual heavy processing off to a seperate thread to handle so that
|
// Pass the actual heavy processing off to a seperate thread to handle so that
|
||||||
// we can immediately return a response from the server. Some of these actions
|
// we can immediately return a response from the server. Some of these actions
|
||||||
// can take quite some time, especially stopping or restarting.
|
// can take quite some time, especially stopping or restarting.
|
||||||
go func() {
|
go func(server *server.Server) {
|
||||||
if err := s.HandlePowerAction(data); err != nil {
|
if err := server.HandlePowerAction(data); err != nil {
|
||||||
zap.S().Errorw(
|
server.Log().WithFields(log.Fields{"action": data, "error": err}).
|
||||||
"encountered an error processing a server power action",
|
Error("encountered error processing a server power action in the background")
|
||||||
zap.String("server", s.Uuid),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}()
|
}(s)
|
||||||
|
|
||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
@@ -98,17 +98,17 @@ func postServerCommands(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var data struct{ Commands []string `json:"commands"` }
|
var data struct {
|
||||||
c.BindJSON(&data)
|
Commands []string `json:"commands"`
|
||||||
|
}
|
||||||
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||||
|
if err := c.BindJSON(&data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
for _, command := range data.Commands {
|
for _, command := range data.Commands {
|
||||||
if err := s.Environment.SendCommand(command); err != nil {
|
if err := s.Environment.SendCommand(command); err != nil {
|
||||||
zap.S().Warnw(
|
s.Log().WithFields(log.Fields{"command": command, "error": err}).Warn("failed to send command to server instance")
|
||||||
"failed to send command to server",
|
|
||||||
zap.String("server", s.Uuid),
|
|
||||||
zap.String("command", command),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,12 +135,8 @@ func postServerInstall(c *gin.Context) {
|
|||||||
s := GetServer(c.Param("server"))
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
go func(serv *server.Server) {
|
go func(serv *server.Server) {
|
||||||
if err := serv.Install(); err != nil {
|
if err := serv.Install(true); err != nil {
|
||||||
zap.S().Errorw(
|
serv.Log().WithField("error", err).Error("failed to execute server installation process")
|
||||||
"failed to execute server installation process",
|
|
||||||
zap.String("server", serv.Uuid),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}(s)
|
}(s)
|
||||||
|
|
||||||
@@ -153,11 +149,7 @@ func postServerReinstall(c *gin.Context) {
|
|||||||
|
|
||||||
go func(serv *server.Server) {
|
go func(serv *server.Server) {
|
||||||
if err := serv.Reinstall(); err != nil {
|
if err := serv.Reinstall(); err != nil {
|
||||||
zap.S().Errorw(
|
serv.Log().WithField("error", err).Error("failed to complete server re-install process")
|
||||||
"failed to complete server reinstall process",
|
|
||||||
zap.String("server", serv.Uuid),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}(s)
|
}(s)
|
||||||
|
|
||||||
@@ -170,14 +162,22 @@ func deleteServer(c *gin.Context) {
|
|||||||
|
|
||||||
// Immediately suspend the server to prevent a user from attempting
|
// Immediately suspend the server to prevent a user from attempting
|
||||||
// to start it while this process is running.
|
// to start it while this process is running.
|
||||||
s.Suspended = true
|
s.Config().SetSuspended(true)
|
||||||
|
|
||||||
|
// If the server is currently installing, abort it.
|
||||||
|
if s.IsInstalling() {
|
||||||
|
s.AbortInstallation()
|
||||||
|
}
|
||||||
|
|
||||||
// Delete the server's archive if it exists. We intentionally don't return
|
// Delete the server's archive if it exists. We intentionally don't return
|
||||||
// here, if the archive fails to delete, the server can still be removed.
|
// here, if the archive fails to delete, the server can still be removed.
|
||||||
if err := s.Archiver.DeleteIfExists(); err != nil {
|
if err := s.Archiver.DeleteIfExists(); err != nil {
|
||||||
zap.S().Warnw("failed to delete server archive during deletion process", zap.String("server", s.Uuid), zap.Error(err))
|
s.Log().WithField("error", err).Warn("failed to delete server archive during deletion process")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unsubscribe all of the event listeners.
|
||||||
|
s.Events().UnsubscribeAll()
|
||||||
|
|
||||||
// Destroy the environment; in Docker this will handle a running container and
|
// Destroy the environment; in Docker this will handle a running container and
|
||||||
// forcibly terminate it before removing the container, so we do not need to handle
|
// forcibly terminate it before removing the container, so we do not need to handle
|
||||||
// that here.
|
// that here.
|
||||||
@@ -193,13 +193,16 @@ func deleteServer(c *gin.Context) {
|
|||||||
// so we don't want to block the HTTP call while waiting on this.
|
// so we don't want to block the HTTP call while waiting on this.
|
||||||
go func(p string) {
|
go func(p string) {
|
||||||
if err := os.RemoveAll(p); err != nil {
|
if err := os.RemoveAll(p); err != nil {
|
||||||
zap.S().Warnw("failed to remove server files during deletion process", zap.String("path", p), zap.Error(errors.WithStack(err)))
|
log.WithFields(log.Fields{
|
||||||
|
"path": p,
|
||||||
|
"error": errors.WithStack(err),
|
||||||
|
}).Warn("failed to remove server files during deletion process")
|
||||||
}
|
}
|
||||||
}(s.Filesystem.Path())
|
}(s.Filesystem.Path())
|
||||||
|
|
||||||
var uuid = s.Uuid
|
var uuid = s.Id()
|
||||||
server.GetServers().Remove(func(s2 *server.Server) bool {
|
server.GetServers().Remove(func(s2 *server.Server) bool {
|
||||||
return s2.Uuid == uuid
|
return s2.Id() == uuid
|
||||||
})
|
})
|
||||||
|
|
||||||
// Deallocate the reference to this server.
|
// Deallocate the reference to this server.
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package router
|
package router
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"github.com/pterodactyl/wings/server/backup"
|
"github.com/pterodactyl/wings/server/backup"
|
||||||
"go.uber.org/zap"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -12,14 +13,35 @@ import (
|
|||||||
func postServerBackup(c *gin.Context) {
|
func postServerBackup(c *gin.Context) {
|
||||||
s := GetServer(c.Param("server"))
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
data := &backup.LocalBackup{}
|
data := &backup.Request{}
|
||||||
c.BindJSON(&data)
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||||
|
if err := c.BindJSON(&data); err != nil {
|
||||||
go func(b *backup.LocalBackup, serv *server.Server) {
|
return
|
||||||
if err := serv.BackupLocal(b); err != nil {
|
|
||||||
zap.S().Errorw("failed to generate backup for server", zap.Error(err))
|
|
||||||
}
|
}
|
||||||
}(data, s)
|
|
||||||
|
var adapter backup.BackupInterface
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch data.Adapter {
|
||||||
|
case backup.LocalBackupAdapter:
|
||||||
|
adapter, err = data.NewLocalBackup()
|
||||||
|
case backup.S3BackupAdapter:
|
||||||
|
adapter, err = data.NewS3Backup()
|
||||||
|
default:
|
||||||
|
err = errors.New(fmt.Sprintf("unknown backup adapter [%s] provided", data.Adapter))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go func(b backup.BackupInterface, serv *server.Server) {
|
||||||
|
if err := serv.Backup(b); err != nil {
|
||||||
|
serv.Log().WithField("error", err).Error("failed to generate backup for server")
|
||||||
|
}
|
||||||
|
}(adapter, s)
|
||||||
|
|
||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,16 +2,30 @@ package router
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/pterodactyl/wings/server"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns the contents of a file on the server.
|
// Returns the contents of a file on the server.
|
||||||
func getServerFileContents(c *gin.Context) {
|
func getServerFileContents(c *gin.Context) {
|
||||||
s := GetServer(c.Param("server"))
|
s := GetServer(c.Param("server"))
|
||||||
cleaned, err := s.Filesystem.SafePath(c.Query("file"))
|
|
||||||
|
p, err := url.QueryUnescape(c.Query("file"))
|
||||||
|
if err != nil {
|
||||||
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p = "/" + strings.TrimLeft(p, "/")
|
||||||
|
|
||||||
|
cleaned, err := s.Filesystem.SafePath(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||||
"error": "The file requested could not be found.",
|
"error": "The file requested could not be found.",
|
||||||
@@ -56,33 +70,71 @@ func getServerFileContents(c *gin.Context) {
|
|||||||
func getServerListDirectory(c *gin.Context) {
|
func getServerListDirectory(c *gin.Context) {
|
||||||
s := GetServer(c.Param("server"))
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
stats, err := s.Filesystem.ListDirectory(c.Query("directory"))
|
d, err := url.QueryUnescape(c.Query("directory"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
TrackedServerError(err, s).AbortWithServerError(c)
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stats, err := s.Filesystem.ListDirectory(d)
|
||||||
|
if err != nil {
|
||||||
|
if err.Error() == "readdirent: not a directory" {
|
||||||
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||||
|
"error": "The requested directory does not exist.",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
c.JSON(http.StatusOK, stats)
|
c.JSON(http.StatusOK, stats)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Renames (or moves) a file for a server.
|
type renameFile struct {
|
||||||
func putServerRenameFile(c *gin.Context) {
|
To string `json:"to"`
|
||||||
|
From string `json:"from"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Renames (or moves) files for a server.
|
||||||
|
func putServerRenameFiles(c *gin.Context) {
|
||||||
s := GetServer(c.Param("server"))
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
var data struct{
|
var data struct {
|
||||||
RenameFrom string `json:"rename_from"`
|
Root string `json:"root"`
|
||||||
RenameTo string `json:"rename_to"`
|
Files []renameFile `json:"files"`
|
||||||
|
}
|
||||||
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||||
|
if err := c.BindJSON(&data); err != nil {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
c.BindJSON(&data)
|
|
||||||
|
|
||||||
if data.RenameFrom == "" || data.RenameTo == "" {
|
if len(data.Files) == 0 {
|
||||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||||
"error": "Invalid paths were provided, did you forget to provide both a new and old path?",
|
"error": "No files to move or rename were provided.",
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Filesystem.Rename(data.RenameFrom, data.RenameTo); err != nil {
|
g, ctx := errgroup.WithContext(context.Background())
|
||||||
|
|
||||||
|
// Loop over the array of files passed in and perform the move or rename action against each.
|
||||||
|
for _, p := range data.Files {
|
||||||
|
pf := path.Join(data.Root, p.From)
|
||||||
|
pt := path.Join(data.Root, p.To)
|
||||||
|
|
||||||
|
g.Go(func() error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
return s.Filesystem.Rename(pf, pt)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
TrackedServerError(err, s).AbortWithServerError(c)
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -97,7 +149,10 @@ func postServerCopyFile(c *gin.Context) {
|
|||||||
var data struct {
|
var data struct {
|
||||||
Location string `json:"location"`
|
Location string `json:"location"`
|
||||||
}
|
}
|
||||||
c.BindJSON(&data)
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||||
|
if err := c.BindJSON(&data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err := s.Filesystem.Copy(data.Location); err != nil {
|
if err := s.Filesystem.Copy(data.Location); err != nil {
|
||||||
TrackedServerError(err, s).AbortWithServerError(c)
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
@@ -107,16 +162,44 @@ func postServerCopyFile(c *gin.Context) {
|
|||||||
c.Status(http.StatusNoContent)
|
c.Status(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deletes a server file.
|
// Deletes files from a server.
|
||||||
func postServerDeleteFile(c *gin.Context) {
|
func postServerDeleteFiles(c *gin.Context) {
|
||||||
s := GetServer(c.Param("server"))
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
var data struct {
|
var data struct {
|
||||||
Location string `json:"location"`
|
Root string `json:"root"`
|
||||||
|
Files []string `json:"files"`
|
||||||
}
|
}
|
||||||
c.BindJSON(&data)
|
|
||||||
|
|
||||||
if err := s.Filesystem.Delete(data.Location); err != nil {
|
if err := c.BindJSON(&data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data.Files) == 0 {
|
||||||
|
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||||
|
"error": "No files were specififed for deletion.",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
g, ctx := errgroup.WithContext(context.Background())
|
||||||
|
|
||||||
|
// Loop over the array of files passed in and delete them. If any of the file deletions
|
||||||
|
// fail just abort the process entirely.
|
||||||
|
for _, p := range data.Files {
|
||||||
|
pi := path.Join(data.Root, p)
|
||||||
|
|
||||||
|
g.Go(func() error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
return s.Filesystem.Delete(pi)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
TrackedServerError(err, s).AbortWithServerError(c)
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -128,7 +211,14 @@ func postServerDeleteFile(c *gin.Context) {
|
|||||||
func postServerWriteFile(c *gin.Context) {
|
func postServerWriteFile(c *gin.Context) {
|
||||||
s := GetServer(c.Param("server"))
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
if err := s.Filesystem.Writefile(c.Query("file"), c.Request.Body); err != nil {
|
f, err := url.QueryUnescape(c.Query("file"))
|
||||||
|
if err != nil {
|
||||||
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f = "/" + strings.TrimLeft(f, "/")
|
||||||
|
|
||||||
|
if err := s.Filesystem.Writefile(f, c.Request.Body); err != nil {
|
||||||
TrackedServerError(err, s).AbortWithServerError(c)
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -144,7 +234,10 @@ func postServerCreateDirectory(c *gin.Context) {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
}
|
}
|
||||||
c.BindJSON(&data)
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||||
|
if err := c.BindJSON(&data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err := s.Filesystem.CreateDirectory(data.Name, data.Path); err != nil {
|
if err := s.Filesystem.CreateDirectory(data.Name, data.Path); err != nil {
|
||||||
TrackedServerError(err, s).AbortWithServerError(c)
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
@@ -153,3 +246,74 @@ func postServerCreateDirectory(c *gin.Context) {
|
|||||||
|
|
||||||
c.Status(http.StatusNoContent)
|
c.Status(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func postServerCompressFiles(c *gin.Context) {
|
||||||
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
|
var data struct {
|
||||||
|
RootPath string `json:"root"`
|
||||||
|
Files []string `json:"files"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.BindJSON(&data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data.Files) == 0 {
|
||||||
|
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||||
|
"error": "No files were passed through to be compressed.",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.Filesystem.HasSpaceAvailable() {
|
||||||
|
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
||||||
|
"error": "This server does not have enough available disk space to generate a compressed archive.",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := s.Filesystem.CompressFiles(data.RootPath, data.Files)
|
||||||
|
if err != nil {
|
||||||
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, &server.Stat{
|
||||||
|
Info: f,
|
||||||
|
Mimetype: "application/tar+gzip",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func postServerDecompressFiles(c *gin.Context) {
|
||||||
|
s := GetServer(c.Param("server"))
|
||||||
|
|
||||||
|
var data struct {
|
||||||
|
RootPath string `json:"root"`
|
||||||
|
File string `json:"file"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.BindJSON(&data); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasSpace, err := s.Filesystem.SpaceAvailableForDecompression(data.RootPath, data.File)
|
||||||
|
if err != nil {
|
||||||
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasSpace {
|
||||||
|
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
||||||
|
"error": "This server does not have enough available disk space to decompress this archive.",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Filesystem.DecompressFile(data.RootPath, data.File); err != nil {
|
||||||
|
TrackedServerError(err, s).AbortWithServerError(c)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Status(http.StatusNoContent)
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
ws "github.com/gorilla/websocket"
|
ws "github.com/gorilla/websocket"
|
||||||
"github.com/pterodactyl/wings/router/websocket"
|
"github.com/pterodactyl/wings/router/websocket"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Upgrades a connection to a websocket and passes events along between.
|
// Upgrades a connection to a websocket and passes events along between.
|
||||||
@@ -40,7 +39,7 @@ func getServerWebsocket(c *gin.Context) {
|
|||||||
ws.CloseServiceRestart,
|
ws.CloseServiceRestart,
|
||||||
ws.CloseAbnormalClosure,
|
ws.CloseAbnormalClosure,
|
||||||
) {
|
) {
|
||||||
zap.S().Warnw("error handling websocket message", zap.Error(err))
|
s.Log().WithField("error", err).Warn("error handling websocket message for server")
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -52,9 +51,10 @@ func getServerWebsocket(c *gin.Context) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := handler.HandleInbound(j); err != nil {
|
go func(msg websocket.Message) {
|
||||||
handler.SendErrorJson(err)
|
if err := handler.HandleInbound(msg); err != nil {
|
||||||
|
handler.SendErrorJson(msg, err)
|
||||||
}
|
}
|
||||||
|
}(j)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,13 +2,14 @@ package router
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/installer"
|
"github.com/pterodactyl/wings/installer"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
"go.uber.org/zap"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns information about the system that wings is running on.
|
// Returns information about the system that wings is running on.
|
||||||
@@ -58,12 +59,8 @@ func postCreateServer(c *gin.Context) {
|
|||||||
go func(i *installer.Installer) {
|
go func(i *installer.Installer) {
|
||||||
i.Execute()
|
i.Execute()
|
||||||
|
|
||||||
if err := i.Server().Install(); err != nil {
|
if err := i.Server().Install(false); err != nil {
|
||||||
zap.S().Errorw(
|
log.WithFields(log.Fields{"server": i.Uuid(), "error": err}).Error("failed to run install process for server")
|
||||||
"failed to run install process for server",
|
|
||||||
zap.String("server", i.Uuid()),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}(install)
|
}(install)
|
||||||
|
|
||||||
@@ -77,7 +74,20 @@ func postUpdateConfiguration(c *gin.Context) {
|
|||||||
// A copy of the configuration we're using to bind the data recevied into.
|
// A copy of the configuration we're using to bind the data recevied into.
|
||||||
cfg := *config.Get()
|
cfg := *config.Get()
|
||||||
|
|
||||||
c.BindJSON(&cfg)
|
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||||
|
if err := c.BindJSON(&cfg); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep the SSL certificates the same since the Panel will send through Lets Encrypt
|
||||||
|
// default locations. However, if we picked a different location manually we don't
|
||||||
|
// want to override that.
|
||||||
|
//
|
||||||
|
// If you pass through manual locations in the API call this logic will be skipped.
|
||||||
|
if strings.HasPrefix(cfg.Api.Ssl.KeyFile, "/etc/letsencrypt/live/") {
|
||||||
|
cfg.Api.Ssl.KeyFile = ccopy.Api.Ssl.KeyFile
|
||||||
|
cfg.Api.Ssl.CertificateFile = ccopy.Api.Ssl.CertificateFile
|
||||||
|
}
|
||||||
|
|
||||||
config.Set(&cfg)
|
config.Set(&cfg)
|
||||||
if err := config.Get().WriteToDisk(); err != nil {
|
if err := config.Get().WriteToDisk(); err != nil {
|
||||||
|
|||||||
@@ -98,33 +98,33 @@ func postServerArchive(c *gin.Context) {
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
if err := server.Archiver.Archive(); err != nil {
|
if err := server.Archiver.Archive(); err != nil {
|
||||||
zap.S().Errorw("failed to get archive for server", zap.String("server", s.Uuid), zap.Error(err))
|
zap.S().Errorw("failed to get archive for server", zap.String("server", server.Id()), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw(
|
zap.S().Debugw(
|
||||||
"successfully created archive for server",
|
"successfully created archive for server",
|
||||||
zap.String("server", server.Uuid),
|
zap.String("server", server.Id()),
|
||||||
zap.Duration("time", time.Now().Sub(start).Round(time.Microsecond)),
|
zap.Duration("time", time.Now().Sub(start).Round(time.Microsecond)),
|
||||||
)
|
)
|
||||||
|
|
||||||
r := api.NewRequester()
|
r := api.NewRequester()
|
||||||
rerr, err := r.SendArchiveStatus(server.Uuid, true)
|
rerr, err := r.SendArchiveStatus(server.Id(), true)
|
||||||
if rerr != nil || err != nil {
|
if rerr != nil || err != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorw("failed to notify panel with archive status", zap.String("server", server.Uuid), zap.Error(err))
|
zap.S().Errorw("failed to notify panel with archive status", zap.String("server", server.Id()), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Errorw(
|
zap.S().Errorw(
|
||||||
"panel returned an error when sending the archive status",
|
"panel returned an error when sending the archive status",
|
||||||
zap.String("server", server.Uuid),
|
zap.String("server", server.Id()),
|
||||||
zap.Error(errors.New(rerr.String())),
|
zap.Error(errors.New(rerr.String())),
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("successfully notified panel about archive status", zap.String("server", server.Uuid))
|
zap.S().Debugw("successfully notified panel about archive status", zap.String("server", server.Id()))
|
||||||
}(s)
|
}(s)
|
||||||
|
|
||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
|
|||||||
@@ -4,10 +4,13 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type WebsocketPayload struct {
|
type WebsocketPayload struct {
|
||||||
jwt.Payload
|
jwt.Payload
|
||||||
|
sync.RWMutex
|
||||||
|
|
||||||
UserID json.Number `json:"user_id"`
|
UserID json.Number `json:"user_id"`
|
||||||
ServerUUID string `json:"server_uuid"`
|
ServerUUID string `json:"server_uuid"`
|
||||||
Permissions []string `json:"permissions"`
|
Permissions []string `json:"permissions"`
|
||||||
@@ -15,11 +18,24 @@ type WebsocketPayload struct {
|
|||||||
|
|
||||||
// Returns the JWT payload.
|
// Returns the JWT payload.
|
||||||
func (p *WebsocketPayload) GetPayload() *jwt.Payload {
|
func (p *WebsocketPayload) GetPayload() *jwt.Payload {
|
||||||
|
p.RLock()
|
||||||
|
defer p.RUnlock()
|
||||||
|
|
||||||
return &p.Payload
|
return &p.Payload
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *WebsocketPayload) GetServerUuid() string {
|
||||||
|
p.RLock()
|
||||||
|
defer p.RUnlock()
|
||||||
|
|
||||||
|
return p.ServerUUID
|
||||||
|
}
|
||||||
|
|
||||||
// Checks if the given token payload has a permission string.
|
// Checks if the given token payload has a permission string.
|
||||||
func (p *WebsocketPayload) HasPermission(permission string) bool {
|
func (p *WebsocketPayload) HasPermission(permission string) bool {
|
||||||
|
p.RLock()
|
||||||
|
defer p.RUnlock()
|
||||||
|
|
||||||
for _, k := range p.Permissions {
|
for _, k := range p.Permissions {
|
||||||
if k == permission || (!strings.HasPrefix(permission, "admin") && k == "*") {
|
if k == permission || (!strings.HasPrefix(permission, "admin") && k == "*") {
|
||||||
return true
|
return true
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
package websocket
|
package websocket
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
@@ -9,7 +11,6 @@ import (
|
|||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/router/tokens"
|
"github.com/pterodactyl/wings/router/tokens"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"go.uber.org/zap"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -84,7 +85,6 @@ func (h *Handler) SendJson(v *Message) error {
|
|||||||
// If we're sending installation output but the user does not have the required
|
// If we're sending installation output but the user does not have the required
|
||||||
// permissions to see the output, don't send it down the line.
|
// permissions to see the output, don't send it down the line.
|
||||||
if v.Event == server.InstallOutputEvent {
|
if v.Event == server.InstallOutputEvent {
|
||||||
zap.S().Debugf("%+v", v.Args)
|
|
||||||
if !j.HasPermission(PermissionReceiveInstall) {
|
if !j.HasPermission(PermissionReceiveInstall) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -127,7 +127,7 @@ func (h *Handler) TokenValid() error {
|
|||||||
return errors.New("jwt does not have connect permission")
|
return errors.New("jwt does not have connect permission")
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.server.Uuid != j.ServerUUID {
|
if h.server.Id() != j.GetServerUuid() {
|
||||||
return errors.New("jwt server uuid mismatch")
|
return errors.New("jwt server uuid mismatch")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -137,10 +137,7 @@ func (h *Handler) TokenValid() error {
|
|||||||
// Sends an error back to the connected websocket instance by checking the permissions
|
// Sends an error back to the connected websocket instance by checking the permissions
|
||||||
// of the token. If the user has the "receive-errors" grant we will send back the actual
|
// of the token. If the user has the "receive-errors" grant we will send back the actual
|
||||||
// error message, otherwise we just send back a standard error message.
|
// error message, otherwise we just send back a standard error message.
|
||||||
func (h *Handler) SendErrorJson(err error) error {
|
func (h *Handler) SendErrorJson(msg Message, err error) error {
|
||||||
h.Lock()
|
|
||||||
defer h.Unlock()
|
|
||||||
|
|
||||||
j := h.GetJwt()
|
j := h.GetJwt()
|
||||||
|
|
||||||
message := "an unexpected error was encountered while handling this request"
|
message := "an unexpected error was encountered while handling this request"
|
||||||
@@ -154,15 +151,11 @@ func (h *Handler) SendErrorJson(err error) error {
|
|||||||
wsm.Args = []string{m}
|
wsm.Args = []string{m}
|
||||||
|
|
||||||
if !server.IsSuspendedError(err) {
|
if !server.IsSuspendedError(err) {
|
||||||
zap.S().Errorw(
|
h.server.Log().WithFields(log.Fields{"event": msg.Event, "error_identifier": u.String(), "error": err}).
|
||||||
"an error was encountered in the websocket process",
|
Error("failed to handle websocket process; an error was encountered processing an event")
|
||||||
zap.String("server", h.server.Uuid),
|
|
||||||
zap.String("error_identifier", u.String()),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.Connection.WriteJSON(wsm)
|
return h.unsafeSendJson(wsm)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Converts an error message into a more readable representation and returns a UUID
|
// Converts an error message into a more readable representation and returns a UUID
|
||||||
@@ -193,7 +186,7 @@ func (h *Handler) GetJwt() *tokens.WebsocketPayload {
|
|||||||
func (h *Handler) HandleInbound(m Message) error {
|
func (h *Handler) HandleInbound(m Message) error {
|
||||||
if m.Event != AuthenticationEvent {
|
if m.Event != AuthenticationEvent {
|
||||||
if err := h.TokenValid(); err != nil {
|
if err := h.TokenValid(); err != nil {
|
||||||
zap.S().Debugw("jwt token is no longer valid", zap.String("message", err.Error()))
|
log.WithField("message", err.Error()).Debug("jwt for server websocket is no longer valid")
|
||||||
|
|
||||||
h.unsafeSendJson(Message{
|
h.unsafeSendJson(Message{
|
||||||
Event: ErrorEvent,
|
Event: ErrorEvent,
|
||||||
@@ -219,19 +212,48 @@ func (h *Handler) HandleInbound(m Message) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if token.HasPermission(PermissionConnect) {
|
// Check if the user has previously authenticated successfully.
|
||||||
|
newConnection := h.GetJwt() == nil
|
||||||
|
|
||||||
|
// Previously there was a HasPermission(PermissionConnect) check around this,
|
||||||
|
// however NewTokenPayload will return an error if it doesn't have the connect
|
||||||
|
// permission meaning that it was a redundant function call.
|
||||||
h.setJwt(token)
|
h.setJwt(token)
|
||||||
}
|
|
||||||
|
|
||||||
// On every authentication event, send the current server status back
|
|
||||||
// to the client. :)
|
|
||||||
h.server.Events().Publish(server.StatusEvent, h.server.GetState())
|
|
||||||
|
|
||||||
|
// Tell the client they authenticated successfully.
|
||||||
h.unsafeSendJson(Message{
|
h.unsafeSendJson(Message{
|
||||||
Event: AuthenticationSuccessEvent,
|
Event: AuthenticationSuccessEvent,
|
||||||
Args: []string{},
|
Args: []string{},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Check if the client was refreshing their authentication token
|
||||||
|
// instead of authenticating for the first time.
|
||||||
|
if !newConnection {
|
||||||
|
// This prevents duplicate status messages as outlined in
|
||||||
|
// https://github.com/pterodactyl/panel/issues/2077
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// On every authentication event, send the current server status back
|
||||||
|
// to the client. :)
|
||||||
|
state := h.server.GetState()
|
||||||
|
h.SendJson(&Message{
|
||||||
|
Event: server.StatusEvent,
|
||||||
|
Args: []string{state},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Only send the current disk usage if the server is offline, if docker container is running,
|
||||||
|
// Environment#EnableResourcePolling() will send this data to all clients.
|
||||||
|
if state == server.ProcessOfflineState {
|
||||||
|
_ = h.server.Filesystem.HasSpaceAvailable()
|
||||||
|
|
||||||
|
b, _ := json.Marshal(h.server.Proc())
|
||||||
|
h.SendJson(&Message{
|
||||||
|
Event: server.StatsEvent,
|
||||||
|
Args: []string{string(b)},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
case SetStateEvent:
|
case SetStateEvent:
|
||||||
@@ -249,11 +271,14 @@ func (h *Handler) HandleInbound(m Message) error {
|
|||||||
break
|
break
|
||||||
case "restart":
|
case "restart":
|
||||||
if h.GetJwt().HasPermission(PermissionSendPowerRestart) {
|
if h.GetJwt().HasPermission(PermissionSendPowerRestart) {
|
||||||
if err := h.server.Environment.WaitForStop(60, false); err != nil {
|
// If the server is alreay restarting don't do anything. Perhaps we send back an event
|
||||||
return err
|
// in the future for this? For now no reason to knowingly trigger an error by trying to
|
||||||
|
// restart a process already restarting.
|
||||||
|
if h.server.Environment.IsRestarting() {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.server.Environment.Start()
|
return h.server.Environment.Restart()
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
case "kill":
|
case "kill":
|
||||||
|
|||||||
17
server/allocations.go
Normal file
17
server/allocations.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
// Defines the allocations available for a given server. When using the Docker environment
|
||||||
|
// driver these correspond to mappings for the container that allow external connections.
|
||||||
|
type Allocations struct {
|
||||||
|
// Defines the default allocation that should be used for this server. This is
|
||||||
|
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
||||||
|
// files or the startup arguments for a server.
|
||||||
|
DefaultMapping struct {
|
||||||
|
Ip string `json:"ip"`
|
||||||
|
Port int `json:"port"`
|
||||||
|
} `json:"default"`
|
||||||
|
|
||||||
|
// Mappings contains all of the ports that should be assigned to a given server
|
||||||
|
// attached to the IP they correspond to.
|
||||||
|
Mappings map[string][]int `json:"mappings"`
|
||||||
|
}
|
||||||
@@ -23,7 +23,7 @@ func (a *Archiver) ArchivePath() string {
|
|||||||
|
|
||||||
// ArchiveName returns the name of the server's archive.
|
// ArchiveName returns the name of the server's archive.
|
||||||
func (a *Archiver) ArchiveName() string {
|
func (a *Archiver) ArchiveName() string {
|
||||||
return a.Server.Uuid + ".tar.gz"
|
return a.Server.Id() + ".tar.gz"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Exists returns a boolean based off if the archive exists.
|
// Exists returns a boolean based off if the archive exists.
|
||||||
@@ -52,7 +52,12 @@ func (a *Archiver) Archive() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range fileInfo {
|
for _, file := range fileInfo {
|
||||||
files = append(files, filepath.Join(path, file.Name()))
|
f, err := a.Server.Filesystem.SafeJoin(path, file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
files = append(files, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err := a.Stat()
|
stat, err := a.Stat()
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
"github.com/pterodactyl/wings/server/backup"
|
"github.com/pterodactyl/wings/server/backup"
|
||||||
"go.uber.org/zap"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
)
|
)
|
||||||
@@ -17,16 +17,15 @@ func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, suc
|
|||||||
rerr, err := r.SendBackupStatus(uuid, ad.ToRequest(successful))
|
rerr, err := r.SendBackupStatus(uuid, ad.ToRequest(successful))
|
||||||
if rerr != nil || err != nil {
|
if rerr != nil || err != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorw(
|
s.Log().WithFields(log.Fields{
|
||||||
"failed to notify panel of backup status due to internal code error",
|
"backup": uuid,
|
||||||
zap.String("backup", s.Uuid),
|
"error": err,
|
||||||
zap.Error(err),
|
}).Error("failed to notify panel of backup status due to internal code error")
|
||||||
)
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Warnw(rerr.String(), zap.String("backup", uuid))
|
s.Log().WithField("backup", uuid).Warn(rerr.String())
|
||||||
|
|
||||||
return errors.New(rerr.String())
|
return errors.New(rerr.String())
|
||||||
}
|
}
|
||||||
@@ -34,42 +33,65 @@ func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, suc
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Performs a server backup and then emits the event over the server websocket. We
|
// Get all of the ignored files for a server based on its .pteroignore file in the root.
|
||||||
// let the actual backup system handle notifying the panel of the status, but that
|
func (s *Server) getServerwideIgnoredFiles() ([]string, error) {
|
||||||
// won't emit a websocket event.
|
var ignored []string
|
||||||
func (s *Server) BackupLocal(b *backup.LocalBackup) error {
|
|
||||||
// If no ignored files are present in the request, check for a .pteroignore file in the root
|
|
||||||
// of the server files directory, and use that to generate the backup.
|
|
||||||
if len(b.IgnoredFiles) == 0 {
|
|
||||||
f, err := os.Open(path.Join(s.Filesystem.Path(), ".pteroignore"))
|
f, err := os.Open(path.Join(s.Filesystem.Path(), ".pteroignore"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
zap.S().Warnw("failed to open .pteroignore file in server directory", zap.String("server", s.Uuid), zap.Error(errors.WithStack(err)))
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
scanner := bufio.NewScanner(f)
|
scanner := bufio.NewScanner(f)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
// Only include non-empty lines, for the sake of clarity...
|
// Only include non-empty lines, for the sake of clarity...
|
||||||
if t := scanner.Text(); t != "" {
|
if t := scanner.Text(); t != "" {
|
||||||
b.IgnoredFiles = append(b.IgnoredFiles, t)
|
ignored = append(ignored, t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
zap.S().Warnw("failed to scan .pteroignore file for lines", zap.String("server", s.Uuid), zap.Error(errors.WithStack(err)))
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ignored, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the backup files to include when generating it.
|
||||||
|
func (s *Server) GetIncludedBackupFiles(ignored []string) (*backup.IncludedFiles, error) {
|
||||||
|
// If no ignored files are present in the request, check for a .pteroignore file in the root
|
||||||
|
// of the server files directory, and use that to generate the backup.
|
||||||
|
if len(ignored) == 0 {
|
||||||
|
if i, err := s.getServerwideIgnoredFiles(); err != nil {
|
||||||
|
s.Log().WithField("error", err).Warn("failed to retrieve ignored files listing for server")
|
||||||
|
} else {
|
||||||
|
ignored = i
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the included files based on the root path and the ignored files provided.
|
// Get the included files based on the root path and the ignored files provided.
|
||||||
inc, err := s.Filesystem.GetIncludedFiles(s.Filesystem.Path(), b.IgnoredFiles)
|
return s.Filesystem.GetIncludedFiles(s.Filesystem.Path(), ignored)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Performs a server backup and then emits the event over the server websocket. We
|
||||||
|
// let the actual backup system handle notifying the panel of the status, but that
|
||||||
|
// won't emit a websocket event.
|
||||||
|
func (s *Server) Backup(b backup.BackupInterface) error {
|
||||||
|
// Get the included files based on the root path and the ignored files provided.
|
||||||
|
inc, err := s.GetIncludedBackupFiles(b.Ignored())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := b.Backup(inc, s.Filesystem.Path()); err != nil {
|
ad, err := b.Generate(inc, s.Filesystem.Path())
|
||||||
|
if err != nil {
|
||||||
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
|
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
|
||||||
zap.S().Warnw("failed to notify panel of failed backup state", zap.String("backup", b.Uuid), zap.Error(err))
|
s.Log().WithFields(log.Fields{
|
||||||
|
"backup": b.Identifier(),
|
||||||
|
"error": err,
|
||||||
|
}).Warn("failed to notify panel of failed backup state")
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
@@ -77,7 +99,6 @@ func (s *Server) BackupLocal(b *backup.LocalBackup) error {
|
|||||||
|
|
||||||
// Try to notify the panel about the status of this backup. If for some reason this request
|
// Try to notify the panel about the status of this backup. If for some reason this request
|
||||||
// fails, delete the archive from the daemon and return that error up the chain to the caller.
|
// fails, delete the archive from the daemon and return that error up the chain to the caller.
|
||||||
ad := b.Details()
|
|
||||||
if notifyError := s.notifyPanelOfBackup(b.Identifier(), ad, true); notifyError != nil {
|
if notifyError := s.notifyPanelOfBackup(b.Identifier(), ad, true); notifyError != nil {
|
||||||
b.Remove()
|
b.Remove()
|
||||||
|
|
||||||
@@ -86,8 +107,8 @@ func (s *Server) BackupLocal(b *backup.LocalBackup) error {
|
|||||||
|
|
||||||
// Emit an event over the socket so we can update the backup in realtime on
|
// Emit an event over the socket so we can update the backup in realtime on
|
||||||
// the frontend for the server.
|
// the frontend for the server.
|
||||||
s.Events().PublishJson(BackupCompletedEvent+":"+b.Uuid, map[string]interface{}{
|
s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
||||||
"uuid": b.Uuid,
|
"uuid": b.Identifier(),
|
||||||
"sha256_hash": ad.Checksum,
|
"sha256_hash": ad.Checksum,
|
||||||
"file_size": ad.Size,
|
"file_size": ad.Size,
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ package backup
|
|||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/apex/log"
|
||||||
gzip "github.com/klauspost/pgzip"
|
gzip "github.com/klauspost/pgzip"
|
||||||
"github.com/remeh/sizedwaitgroup"
|
"github.com/remeh/sizedwaitgroup"
|
||||||
"go.uber.org/zap"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@@ -20,11 +20,11 @@ type Archive struct {
|
|||||||
Files *IncludedFiles
|
Files *IncludedFiles
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates an archive at dest with all of the files definied in the included files struct.
|
// Creates an archive at dst with all of the files defined in the included files struct.
|
||||||
func (a *Archive) Create(dest string, ctx context.Context) error {
|
func (a *Archive) Create(dst string, ctx context.Context) (os.FileInfo, error) {
|
||||||
f, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
@@ -66,14 +66,19 @@ func (a *Archive) Create(dest string, ctx context.Context) error {
|
|||||||
|
|
||||||
// Attempt to remove the archive if there is an error, report that error to
|
// Attempt to remove the archive if there is an error, report that error to
|
||||||
// the logger if it fails.
|
// the logger if it fails.
|
||||||
if rerr := os.Remove(dest); rerr != nil && !os.IsNotExist(rerr) {
|
if rerr := os.Remove(dst); rerr != nil && !os.IsNotExist(rerr) {
|
||||||
zap.S().Warnw("failed to delete corrupted backup archive", zap.String("location", dest))
|
log.WithField("location", dst).Warn("failed to delete corrupted backup archive")
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
st, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return st, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a single file to the existing tar archive writer.
|
// Adds a single file to the existing tar archive writer.
|
||||||
@@ -99,7 +104,7 @@ func (a *Archive) addToArchive(p string, s *os.FileInfo, w *tar.Writer) error {
|
|||||||
a.Lock()
|
a.Lock()
|
||||||
defer a.Unlock()
|
defer a.Unlock()
|
||||||
|
|
||||||
if err = w.WriteHeader(header); err != nil {
|
if err := w.WriteHeader(header); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,31 +1,22 @@
|
|||||||
package backup
|
package backup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"github.com/apex/log"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Backup interface {
|
const (
|
||||||
// Returns the UUID of this backup as tracked by the panel instance.
|
LocalBackupAdapter = "wings"
|
||||||
Identifier() string
|
S3BackupAdapter = "s3"
|
||||||
|
)
|
||||||
// Generates a backup in whatever the configured source for the specific
|
|
||||||
// implementation is.
|
|
||||||
Backup(*IncludedFiles, string) error
|
|
||||||
|
|
||||||
// Returns a SHA256 checksum for the generated backup.
|
|
||||||
Checksum() ([]byte, error)
|
|
||||||
|
|
||||||
// Returns the size of the generated backup.
|
|
||||||
Size() (int64, error)
|
|
||||||
|
|
||||||
// Returns the path to the backup on the machine. This is not always the final
|
|
||||||
// storage location of the backup, simply the location we're using to store
|
|
||||||
// it until it is moved to the final spot.
|
|
||||||
Path() string
|
|
||||||
|
|
||||||
// Returns details about the archive.
|
|
||||||
Details() *ArchiveDetails
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArchiveDetails struct {
|
type ArchiveDetails struct {
|
||||||
Checksum string `json:"checksum"`
|
Checksum string `json:"checksum"`
|
||||||
@@ -40,3 +31,124 @@ func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
|
|||||||
Successful: successful,
|
Successful: successful,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Backup struct {
|
||||||
|
// The UUID of this backup object. This must line up with a backup from
|
||||||
|
// the panel instance.
|
||||||
|
Uuid string `json:"uuid"`
|
||||||
|
|
||||||
|
// An array of files to ignore when generating this backup. This should be
|
||||||
|
// compatible with a standard .gitignore structure.
|
||||||
|
IgnoredFiles []string `json:"ignored_files"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// noinspection GoNameStartsWithPackageName
|
||||||
|
type BackupInterface interface {
|
||||||
|
// Returns the UUID of this backup as tracked by the panel instance.
|
||||||
|
Identifier() string
|
||||||
|
|
||||||
|
// Generates a backup in whatever the configured source for the specific
|
||||||
|
// implementation is.
|
||||||
|
Generate(*IncludedFiles, string) (*ArchiveDetails, error)
|
||||||
|
|
||||||
|
// Returns the ignored files for this backup instance.
|
||||||
|
Ignored() []string
|
||||||
|
|
||||||
|
// Returns a SHA256 checksum for the generated backup.
|
||||||
|
Checksum() ([]byte, error)
|
||||||
|
|
||||||
|
// Returns the size of the generated backup.
|
||||||
|
Size() (int64, error)
|
||||||
|
|
||||||
|
// Returns the path to the backup on the machine. This is not always the final
|
||||||
|
// storage location of the backup, simply the location we're using to store
|
||||||
|
// it until it is moved to the final spot.
|
||||||
|
Path() string
|
||||||
|
|
||||||
|
// Returns details about the archive.
|
||||||
|
Details() *ArchiveDetails
|
||||||
|
|
||||||
|
// Removes a backup file.
|
||||||
|
Remove() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Backup) Identifier() string {
|
||||||
|
return b.Uuid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the path for this specific backup.
|
||||||
|
func (b *Backup) Path() string {
|
||||||
|
return path.Join(config.Get().System.BackupDirectory, b.Identifier()+".tar.gz")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the size of the generated backup.
|
||||||
|
func (b *Backup) Size() (int64, error) {
|
||||||
|
st, err := os.Stat(b.Path())
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return st.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the SHA256 checksum of a backup.
|
||||||
|
func (b *Backup) Checksum() ([]byte, error) {
|
||||||
|
h := sha256.New()
|
||||||
|
|
||||||
|
f, err := os.Open(b.Path())
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if _, err := io.Copy(h, f); err != nil {
|
||||||
|
return []byte{}, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.Sum(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns details of the archive by utilizing two go-routines to get the checksum and
|
||||||
|
// the size of the archive.
|
||||||
|
func (b *Backup) Details() *ArchiveDetails {
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(2)
|
||||||
|
|
||||||
|
var checksum string
|
||||||
|
// Calculate the checksum for the file.
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
resp, err := b.Checksum()
|
||||||
|
if err != nil {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"backup": b.Identifier(),
|
||||||
|
"error": err,
|
||||||
|
}).Error("failed to calculate checksum for backup")
|
||||||
|
}
|
||||||
|
|
||||||
|
checksum = hex.EncodeToString(resp)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var sz int64
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
if s, err := b.Size(); err != nil {
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
sz = s
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
return &ArchiveDetails{
|
||||||
|
Checksum: checksum,
|
||||||
|
Size: sz,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Backup) Ignored() []string {
|
||||||
|
return b.IgnoredFiles
|
||||||
|
}
|
||||||
@@ -2,35 +2,24 @@ package backup
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/config"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type LocalBackup struct {
|
type LocalBackup struct {
|
||||||
// The UUID of this backup object. This must line up with a backup from
|
Backup
|
||||||
// the panel instance.
|
|
||||||
Uuid string `json:"uuid"`
|
|
||||||
|
|
||||||
// An array of files to ignore when generating this backup. This should be
|
|
||||||
// compatible with a standard .gitignore structure.
|
|
||||||
IgnoredFiles []string `json:"ignored_files"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Backup = (*LocalBackup)(nil)
|
var _ BackupInterface = (*LocalBackup)(nil)
|
||||||
|
|
||||||
// Locates the backup for a server and returns the local path. This will obviously only
|
// Locates the backup for a server and returns the local path. This will obviously only
|
||||||
// work if the backup was created as a local backup.
|
// work if the backup was created as a local backup.
|
||||||
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
||||||
b := &LocalBackup{
|
b := &LocalBackup{
|
||||||
|
Backup{
|
||||||
Uuid: uuid,
|
Uuid: uuid,
|
||||||
IgnoredFiles: nil,
|
IgnoredFiles: nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
st, err := os.Stat(b.Path())
|
st, err := os.Stat(b.Path())
|
||||||
@@ -45,32 +34,6 @@ func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
|||||||
return b, st, nil
|
return b, st, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *LocalBackup) Identifier() string {
|
|
||||||
return b.Uuid
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the path for this specific backup.
|
|
||||||
func (b *LocalBackup) Path() string {
|
|
||||||
return path.Join(config.Get().System.BackupDirectory, b.Uuid+".tar.gz")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the SHA256 checksum of a backup.
|
|
||||||
func (b *LocalBackup) Checksum() ([]byte, error) {
|
|
||||||
h := sha256.New()
|
|
||||||
|
|
||||||
f, err := os.Open(b.Path())
|
|
||||||
if err != nil {
|
|
||||||
return []byte{}, errors.WithStack(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
if _, err := io.Copy(h, f); err != nil {
|
|
||||||
return []byte{}, errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return h.Sum(nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes a backup from the system.
|
// Removes a backup from the system.
|
||||||
func (b *LocalBackup) Remove() error {
|
func (b *LocalBackup) Remove() error {
|
||||||
return os.Remove(b.Path())
|
return os.Remove(b.Path())
|
||||||
@@ -78,77 +41,15 @@ func (b *LocalBackup) Remove() error {
|
|||||||
|
|
||||||
// Generates a backup of the selected files and pushes it to the defined location
|
// Generates a backup of the selected files and pushes it to the defined location
|
||||||
// for this instance.
|
// for this instance.
|
||||||
func (b *LocalBackup) Backup(included *IncludedFiles, prefix string) error {
|
func (b *LocalBackup) Generate(included *IncludedFiles, prefix string) (*ArchiveDetails, error) {
|
||||||
a := &Archive{
|
a := &Archive{
|
||||||
TrimPrefix: prefix,
|
TrimPrefix: prefix,
|
||||||
Files: included,
|
Files: included,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := a.Create(b.Path(), context.Background())
|
if _, err := a.Create(b.Path(), context.Background()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return b.Details(), nil
|
||||||
}
|
|
||||||
|
|
||||||
// Return the size of the generated backup.
|
|
||||||
func (b *LocalBackup) Size() (int64, error) {
|
|
||||||
st, err := os.Stat(b.Path())
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return st.Size(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns details of the archive by utilizing two go-routines to get the checksum and
|
|
||||||
// the size of the archive.
|
|
||||||
func (b *LocalBackup) Details() *ArchiveDetails {
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
wg.Add(2)
|
|
||||||
|
|
||||||
var checksum string
|
|
||||||
// Calculate the checksum for the file.
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
resp, err := b.Checksum()
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Errorw("failed to calculate checksum for backup", zap.String("backup", b.Uuid), zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
checksum = hex.EncodeToString(resp)
|
|
||||||
}()
|
|
||||||
|
|
||||||
var sz int64
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
st, err := os.Stat(b.Path())
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sz = st.Size()
|
|
||||||
}()
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
return &ArchiveDetails{
|
|
||||||
Checksum: checksum,
|
|
||||||
Size: sz,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensures that the local backup destination for files exists.
|
|
||||||
func (b *LocalBackup) ensureLocalBackupLocation() error {
|
|
||||||
d := config.Get().System.BackupDirectory
|
|
||||||
|
|
||||||
if _, err := os.Stat(d); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return os.MkdirAll(d, 0700)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|||||||
46
server/backup/backup_request.go
Normal file
46
server/backup/backup_request.go
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Request struct {
|
||||||
|
Adapter string `json:"adapter"`
|
||||||
|
Uuid string `json:"uuid"`
|
||||||
|
IgnoredFiles []string `json:"ignored_files"`
|
||||||
|
PresignedUrl string `json:"presigned_url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generates a new local backup struct.
|
||||||
|
func (r *Request) NewLocalBackup() (*LocalBackup, error) {
|
||||||
|
if r.Adapter != LocalBackupAdapter {
|
||||||
|
return nil, errors.New(fmt.Sprintf("cannot create local backup using [%s] adapter", r.Adapter))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &LocalBackup{
|
||||||
|
Backup{
|
||||||
|
Uuid: r.Uuid,
|
||||||
|
IgnoredFiles: r.IgnoredFiles,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generates a new S3 backup struct.
|
||||||
|
func (r *Request) NewS3Backup() (*S3Backup, error) {
|
||||||
|
if r.Adapter != S3BackupAdapter {
|
||||||
|
return nil, errors.New(fmt.Sprintf("cannot create s3 backup using [%s] adapter", r.Adapter))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(r.PresignedUrl) == 0 {
|
||||||
|
return nil, errors.New("a valid presigned S3 upload URL must be provided to use the [s3] adapter")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &S3Backup{
|
||||||
|
Backup: Backup{
|
||||||
|
Uuid: r.Uuid,
|
||||||
|
IgnoredFiles: r.IgnoredFiles,
|
||||||
|
},
|
||||||
|
PresignedUrl: r.PresignedUrl,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
@@ -1,37 +1,86 @@
|
|||||||
package backup
|
package backup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/apex/log"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
type S3Backup struct {
|
type S3Backup struct {
|
||||||
// The UUID of this backup object. This must line up with a backup from
|
Backup
|
||||||
// the panel instance.
|
|
||||||
Uuid string
|
|
||||||
|
|
||||||
// An array of files to ignore when generating this backup. This should be
|
// The pre-signed upload endpoint for the generated backup. This must be
|
||||||
// compatible with a standard .gitignore structure.
|
// provided otherwise this request will fail. This allows us to keep all
|
||||||
IgnoredFiles []string
|
// of the keys off the daemon instances and the panel can handle generating
|
||||||
|
// the credentials for us.
|
||||||
|
PresignedUrl string
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Backup = (*S3Backup)(nil)
|
var _ BackupInterface = (*S3Backup)(nil)
|
||||||
|
|
||||||
func (s *S3Backup) Identifier() string {
|
// Generates a new backup on the disk, moves it into the S3 bucket via the provided
|
||||||
return s.Uuid
|
// presigned URL, and then deletes the backup from the disk.
|
||||||
|
func (s *S3Backup) Generate(included *IncludedFiles, prefix string) (*ArchiveDetails, error) {
|
||||||
|
defer s.Remove()
|
||||||
|
|
||||||
|
a := &Archive{
|
||||||
|
TrimPrefix: prefix,
|
||||||
|
Files: included,
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := a.Create(s.Path(), context.Background()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rc, err := os.Open(s.Path())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer rc.Close()
|
||||||
|
|
||||||
|
if resp, err := s.generateRemoteRequest(rc); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else {
|
||||||
|
resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("failed to put S3 object, %d:%s", resp.StatusCode, resp.Status)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.Details(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S3Backup) Backup(included *IncludedFiles, prefix string) error {
|
// Removes a backup from the system.
|
||||||
panic("implement me")
|
func (s *S3Backup) Remove() error {
|
||||||
|
return os.Remove(s.Path())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S3Backup) Checksum() ([]byte, error) {
|
// Generates the remote S3 request and begins the upload.
|
||||||
return []byte(""), nil
|
func (s *S3Backup) generateRemoteRequest(rc io.ReadCloser) (*http.Response, error) {
|
||||||
}
|
r, err := http.NewRequest(http.MethodPut, s.PresignedUrl, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
func (s *S3Backup) Size() (int64, error) {
|
if sz, err := s.Size(); err != nil {
|
||||||
return 0, nil
|
return nil, err
|
||||||
}
|
} else {
|
||||||
|
r.ContentLength = sz
|
||||||
|
r.Header.Add("Content-Length", strconv.Itoa(int(sz)))
|
||||||
|
r.Header.Add("Content-Type", "application/x-gzip")
|
||||||
|
}
|
||||||
|
|
||||||
func (s *S3Backup) Path() string {
|
r.Body = rc
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *S3Backup) Details() *ArchiveDetails {
|
log.WithFields(log.Fields{
|
||||||
return &ArchiveDetails{}
|
"endpoint": s.PresignedUrl,
|
||||||
|
"headers": r.Header,
|
||||||
|
}).Debug("uploading backup to remote S3 endpoint")
|
||||||
|
|
||||||
|
return http.DefaultClient.Do(r)
|
||||||
}
|
}
|
||||||
|
|||||||
72
server/build_settings.go
Normal file
72
server/build_settings.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
// The build settings for a given server that impact docker container creation and
|
||||||
|
// resource limits for a server instance.
|
||||||
|
type BuildSettings struct {
|
||||||
|
// The total amount of memory in megabytes that this server is allowed to
|
||||||
|
// use on the host system.
|
||||||
|
MemoryLimit int64 `json:"memory_limit"`
|
||||||
|
|
||||||
|
// The amount of additional swap space to be provided to a container instance.
|
||||||
|
Swap int64 `json:"swap"`
|
||||||
|
|
||||||
|
// The relative weight for IO operations in a container. This is relative to other
|
||||||
|
// containers on the system and should be a value between 10 and 1000.
|
||||||
|
IoWeight uint16 `json:"io_weight"`
|
||||||
|
|
||||||
|
// The percentage of CPU that this instance is allowed to consume relative to
|
||||||
|
// the host. A value of 200% represents complete utilization of two cores. This
|
||||||
|
// should be a value between 1 and THREAD_COUNT * 100.
|
||||||
|
CpuLimit int64 `json:"cpu_limit"`
|
||||||
|
|
||||||
|
// The amount of disk space in megabytes that a server is allowed to use.
|
||||||
|
DiskSpace int64 `json:"disk_space"`
|
||||||
|
|
||||||
|
// Sets which CPU threads can be used by the docker instance.
|
||||||
|
Threads string `json:"threads"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) Build() *BuildSettings {
|
||||||
|
return &s.Config().Build
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converts the CPU limit for a server build into a number that can be better understood
|
||||||
|
// by the Docker environment. If there is no limit set, return -1 which will indicate to
|
||||||
|
// Docker that it has unlimited CPU quota.
|
||||||
|
func (b *BuildSettings) ConvertedCpuLimit() int64 {
|
||||||
|
if b.CpuLimit == 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.CpuLimit * 1000
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the hard limit for memory usage to be 5% more than the amount of memory assigned to
|
||||||
|
// the server. If the memory limit for the server is < 4G, use 10%, if less than 2G use
|
||||||
|
// 15%. This avoids unexpected crashes from processes like Java which run over the limit.
|
||||||
|
func (b *BuildSettings) MemoryOverheadMultiplier() float64 {
|
||||||
|
if b.MemoryLimit <= 2048 {
|
||||||
|
return 1.15
|
||||||
|
} else if b.MemoryLimit <= 4096 {
|
||||||
|
return 1.10
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1.05
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BuildSettings) BoundedMemoryLimit() int64 {
|
||||||
|
return int64(math.Round(float64(b.MemoryLimit) * b.MemoryOverheadMultiplier() * 1_000_000))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the amount of swap available as a total in bytes. This is returned as the amount
|
||||||
|
// of memory available to the server initially, PLUS the amount of additional swap to include
|
||||||
|
// which is the format used by Docker.
|
||||||
|
func (b *BuildSettings) ConvertedSwap() int64 {
|
||||||
|
if b.Swap < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return (b.Swap * 1_000_000) + b.BoundedMemoryLimit()
|
||||||
|
}
|
||||||
@@ -2,7 +2,6 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/pterodactyl/wings/parser"
|
"github.com/pterodactyl/wings/parser"
|
||||||
"go.uber.org/zap"
|
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -11,21 +10,22 @@ import (
|
|||||||
func (s *Server) UpdateConfigurationFiles() {
|
func (s *Server) UpdateConfigurationFiles() {
|
||||||
wg := new(sync.WaitGroup)
|
wg := new(sync.WaitGroup)
|
||||||
|
|
||||||
for _, v := range s.processConfiguration.ConfigurationFiles {
|
files := s.ProcessConfiguration().ConfigurationFiles
|
||||||
|
for _, v := range files {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
go func(f parser.ConfigurationFile, server *Server) {
|
go func(f parser.ConfigurationFile, server *Server) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
p, err := s.Filesystem.SafePath(f.FileName)
|
p, err := server.Filesystem.SafePath(f.FileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
zap.S().Errorw("failed to generate safe path for configuration file", zap.String("server", server.Uuid), zap.Error(err))
|
server.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := f.Parse(p, false); err != nil {
|
if err := f.Parse(p, false); err != nil {
|
||||||
zap.S().Errorw("failed to parse and update server configuration file", zap.String("server", server.Uuid), zap.Error(err))
|
server.Log().WithField("error", err).Error("failed to parse and update server configuration file")
|
||||||
}
|
}
|
||||||
}(v, s)
|
}(v, s)
|
||||||
}
|
}
|
||||||
|
|||||||
91
server/configuration.go
Normal file
91
server/configuration.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EnvironmentVariables map[string]interface{}
|
||||||
|
|
||||||
|
// Ugly hacky function to handle environment variables that get passed through as not-a-string
|
||||||
|
// from the Panel. Ideally we'd just say only pass strings, but that is a fragile idea and if a
|
||||||
|
// string wasn't passed through you'd cause a crash or the server to become unavailable. For now
|
||||||
|
// try to handle the most likely values from the JSON and hope for the best.
|
||||||
|
func (ev EnvironmentVariables) Get(key string) string {
|
||||||
|
val, ok := ev[key]
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
switch val.(type) {
|
||||||
|
case int:
|
||||||
|
return strconv.Itoa(val.(int))
|
||||||
|
case int32:
|
||||||
|
return strconv.FormatInt(val.(int64), 10)
|
||||||
|
case int64:
|
||||||
|
return strconv.FormatInt(val.(int64), 10)
|
||||||
|
case float32:
|
||||||
|
return fmt.Sprintf("%f", val.(float32))
|
||||||
|
case float64:
|
||||||
|
return fmt.Sprintf("%f", val.(float64))
|
||||||
|
case bool:
|
||||||
|
return strconv.FormatBool(val.(bool))
|
||||||
|
}
|
||||||
|
|
||||||
|
return val.(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Configuration struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// The unique identifier for the server that should be used when referencing
|
||||||
|
// it against the Panel API (and internally). This will be used when naming
|
||||||
|
// docker containers as well as in log output.
|
||||||
|
Uuid string `json:"uuid"`
|
||||||
|
|
||||||
|
// Whether or not the server is in a suspended state. Suspended servers cannot
|
||||||
|
// be started or modified except in certain scenarios by an admin user.
|
||||||
|
Suspended bool `json:"suspended"`
|
||||||
|
|
||||||
|
// The command that should be used when booting up the server instance.
|
||||||
|
Invocation string `json:"invocation"`
|
||||||
|
|
||||||
|
// An array of environment variables that should be passed along to the running
|
||||||
|
// server process.
|
||||||
|
EnvVars EnvironmentVariables `json:"environment"`
|
||||||
|
|
||||||
|
Allocations Allocations `json:"allocations"`
|
||||||
|
Build BuildSettings `json:"build"`
|
||||||
|
CrashDetectionEnabled bool `default:"true" json:"enabled" yaml:"enabled"`
|
||||||
|
Mounts []Mount `json:"mounts"`
|
||||||
|
Resources ResourceUsage `json:"resources"`
|
||||||
|
|
||||||
|
Container struct {
|
||||||
|
// Defines the Docker image that will be used for this server
|
||||||
|
Image string `json:"image,omitempty"`
|
||||||
|
// If set to true, OOM killer will be disabled on the server's Docker container.
|
||||||
|
// If not present (nil) we will default to disabling it.
|
||||||
|
OomDisabled bool `default:"true" json:"oom_disabled"`
|
||||||
|
} `json:"container,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) Config() *Configuration {
|
||||||
|
s.cfg.mu.RLock()
|
||||||
|
defer s.cfg.mu.RUnlock()
|
||||||
|
|
||||||
|
return &s.cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Configuration) GetUuid() string {
|
||||||
|
c.mu.RLock()
|
||||||
|
defer c.mu.RUnlock()
|
||||||
|
|
||||||
|
return c.Uuid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Configuration) SetSuspended(s bool) {
|
||||||
|
c.mu.Lock()
|
||||||
|
c.Suspended = s
|
||||||
|
c.mu.Unlock()
|
||||||
|
}
|
||||||
@@ -4,19 +4,32 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"go.uber.org/zap"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CrashDetection struct {
|
type CrashHandler struct {
|
||||||
// If set to false, the system will not listen for crash detection events that
|
mu sync.RWMutex
|
||||||
// can indicate that the server stopped unexpectedly.
|
|
||||||
Enabled bool `default:"true" json:"enabled" yaml:"enabled"`
|
|
||||||
|
|
||||||
// Tracks the time of the last server crash event.
|
// Tracks the time of the last server crash event.
|
||||||
lastCrash time.Time
|
lastCrash time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns the time of the last crash for this server instance.
|
||||||
|
func (cd *CrashHandler) LastCrashTime() time.Time {
|
||||||
|
cd.mu.RLock()
|
||||||
|
defer cd.mu.RUnlock()
|
||||||
|
|
||||||
|
return cd.lastCrash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the last crash time for a server.
|
||||||
|
func (cd *CrashHandler) SetLastCrash(t time.Time) {
|
||||||
|
cd.mu.Lock()
|
||||||
|
cd.lastCrash = t
|
||||||
|
cd.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
// Looks at the environment exit state to determine if the process exited cleanly or
|
// Looks at the environment exit state to determine if the process exited cleanly or
|
||||||
// if it was the result of an event that we should try to recover from.
|
// if it was the result of an event that we should try to recover from.
|
||||||
//
|
//
|
||||||
@@ -27,15 +40,13 @@ type CrashDetection struct {
|
|||||||
//
|
//
|
||||||
// If the server is determined to have crashed, the process will be restarted and the
|
// If the server is determined to have crashed, the process will be restarted and the
|
||||||
// counter for the server will be incremented.
|
// counter for the server will be incremented.
|
||||||
//
|
|
||||||
// @todo output event to server console
|
|
||||||
func (s *Server) handleServerCrash() error {
|
func (s *Server) handleServerCrash() error {
|
||||||
// No point in doing anything here if the server isn't currently offline, there
|
// No point in doing anything here if the server isn't currently offline, there
|
||||||
// is no reason to do a crash detection event. If the server crash detection is
|
// is no reason to do a crash detection event. If the server crash detection is
|
||||||
// disabled we want to skip anything after this as well.
|
// disabled we want to skip anything after this as well.
|
||||||
if s.GetState() != ProcessOfflineState || !s.CrashDetection.Enabled {
|
if s.GetState() != ProcessOfflineState || !s.Config().CrashDetectionEnabled {
|
||||||
if !s.CrashDetection.Enabled {
|
if !s.Config().CrashDetectionEnabled {
|
||||||
zap.S().Debugw("server triggered crash detection but handler is disabled for server process", zap.String("server", s.Uuid))
|
s.Log().Debug("server triggered crash detection but handler is disabled for server process")
|
||||||
|
|
||||||
s.PublishConsoleOutputFromDaemon("Server detected as crashed; crash detection is disabled for this instance.")
|
s.PublishConsoleOutputFromDaemon("Server detected as crashed; crash detection is disabled for this instance.")
|
||||||
}
|
}
|
||||||
@@ -51,7 +62,7 @@ func (s *Server) handleServerCrash() error {
|
|||||||
// If the system is not configured to detect a clean exit code as a crash, and the
|
// If the system is not configured to detect a clean exit code as a crash, and the
|
||||||
// crash is not the result of the program running out of memory, do nothing.
|
// crash is not the result of the program running out of memory, do nothing.
|
||||||
if exitCode == 0 && !oomKilled && !config.Get().System.DetectCleanExitAsCrash {
|
if exitCode == 0 && !oomKilled && !config.Get().System.DetectCleanExitAsCrash {
|
||||||
zap.S().Debugw("server exited with successful code; system configured to not detect as crash", zap.String("server", s.Uuid))
|
s.Log().Debug("server exited with successful exit code; system is configured to not detect this as a crash")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -60,7 +71,7 @@ func (s *Server) handleServerCrash() error {
|
|||||||
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Exit code: %d", exitCode))
|
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Exit code: %d", exitCode))
|
||||||
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Out of memory: %t", oomKilled))
|
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Out of memory: %t", oomKilled))
|
||||||
|
|
||||||
c := s.CrashDetection.lastCrash
|
c := s.crasher.LastCrashTime()
|
||||||
// If the last crash time was within the last 60 seconds we do not want to perform
|
// If the last crash time was within the last 60 seconds we do not want to perform
|
||||||
// an automatic reboot of the process. Return an error that can be handled.
|
// an automatic reboot of the process. Return an error that can be handled.
|
||||||
if !c.IsZero() && c.Add(time.Second * 60).After(time.Now()) {
|
if !c.IsZero() && c.Add(time.Second * 60).After(time.Now()) {
|
||||||
@@ -69,7 +80,7 @@ func (s *Server) handleServerCrash() error {
|
|||||||
return &crashTooFrequent{}
|
return &crashTooFrequent{}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.CrashDetection.lastCrash = time.Now()
|
s.crasher.SetLastCrash(time.Now())
|
||||||
|
|
||||||
return s.Environment.Start()
|
return s.Environment.Start()
|
||||||
}
|
}
|
||||||
@@ -31,6 +31,13 @@ type Environment interface {
|
|||||||
// not be returned.
|
// not be returned.
|
||||||
Stop() error
|
Stop() error
|
||||||
|
|
||||||
|
// Restart a server instance. If already stopped the process will be started. This function
|
||||||
|
// will return an error if the server is already performing a restart process as to avoid
|
||||||
|
// unnecessary double/triple/quad looping issues if multiple people press restart or spam the
|
||||||
|
// button to restart.
|
||||||
|
Restart() error
|
||||||
|
IsRestarting() bool
|
||||||
|
|
||||||
// Waits for a server instance to stop gracefully. If the server is still detected
|
// Waits for a server instance to stop gracefully. If the server is still detected
|
||||||
// as running after seconds, an error will be returned, or the server will be terminated
|
// as running after seconds, an error will be returned, or the server will be terminated
|
||||||
// depending on the value of the second argument.
|
// depending on the value of the second argument.
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
@@ -15,17 +16,20 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"go.uber.org/zap"
|
"golang.org/x/sync/semaphore"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Defines the base environment for Docker instances running through Wings.
|
// Defines the base environment for Docker instances running through Wings.
|
||||||
type DockerEnvironment struct {
|
type DockerEnvironment struct {
|
||||||
|
sync.RWMutex
|
||||||
|
|
||||||
Server *Server
|
Server *Server
|
||||||
|
|
||||||
// The Docker client being used for this instance.
|
// The Docker client being used for this instance.
|
||||||
@@ -43,6 +47,25 @@ type DockerEnvironment struct {
|
|||||||
// Holds the stats stream used by the polling commands so that we can easily close
|
// Holds the stats stream used by the polling commands so that we can easily close
|
||||||
// it out.
|
// it out.
|
||||||
stats io.ReadCloser
|
stats io.ReadCloser
|
||||||
|
|
||||||
|
// Locks when we're performing a restart to avoid trying to restart a process that is already
|
||||||
|
// being restarted.
|
||||||
|
restartSem *semaphore.Weighted
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set if this process is currently attached to the process.
|
||||||
|
func (d *DockerEnvironment) SetAttached(a bool) {
|
||||||
|
d.Lock()
|
||||||
|
d.attached = a
|
||||||
|
d.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if the this process is currently attached to the container.
|
||||||
|
func (d *DockerEnvironment) IsAttached() bool {
|
||||||
|
d.RLock()
|
||||||
|
defer d.RUnlock()
|
||||||
|
|
||||||
|
return d.attached
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new base Docker environment. A server must still be attached to it.
|
// Creates a new base Docker environment. A server must still be attached to it.
|
||||||
@@ -71,7 +94,7 @@ func (d *DockerEnvironment) Type() string {
|
|||||||
|
|
||||||
// Determines if the container exists in this environment.
|
// Determines if the container exists in this environment.
|
||||||
func (d *DockerEnvironment) Exists() (bool, error) {
|
func (d *DockerEnvironment) Exists() (bool, error) {
|
||||||
_, err := d.Client.ContainerInspect(context.Background(), d.Server.Uuid)
|
_, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If this error is because the container instance wasn't found via Docker we
|
// If this error is because the container instance wasn't found via Docker we
|
||||||
@@ -95,9 +118,7 @@ func (d *DockerEnvironment) Exists() (bool, error) {
|
|||||||
//
|
//
|
||||||
// @see docker/client/errors.go
|
// @see docker/client/errors.go
|
||||||
func (d *DockerEnvironment) IsRunning() (bool, error) {
|
func (d *DockerEnvironment) IsRunning() (bool, error) {
|
||||||
ctx := context.Background()
|
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||||
|
|
||||||
c, err := d.Client.ContainerInspect(ctx, d.Server.Uuid)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -109,7 +130,7 @@ func (d *DockerEnvironment) IsRunning() (bool, error) {
|
|||||||
// making any changes to the operational state of the container. This allows memory, cpu,
|
// making any changes to the operational state of the container. This allows memory, cpu,
|
||||||
// and IO limitations to be adjusted on the fly for individual instances.
|
// and IO limitations to be adjusted on the fly for individual instances.
|
||||||
func (d *DockerEnvironment) InSituUpdate() error {
|
func (d *DockerEnvironment) InSituUpdate() error {
|
||||||
if _, err := d.Client.ContainerInspect(context.Background(), d.Server.Uuid); err != nil {
|
if _, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err != nil {
|
||||||
// If the container doesn't exist for some reason there really isn't anything
|
// If the container doesn't exist for some reason there really isn't anything
|
||||||
// we can do to fix that in this process (it doesn't make sense at least). In those
|
// we can do to fix that in this process (it doesn't make sense at least). In those
|
||||||
// cases just return without doing anything since we still want to save the configuration
|
// cases just return without doing anything since we still want to save the configuration
|
||||||
@@ -127,7 +148,11 @@ func (d *DockerEnvironment) InSituUpdate() error {
|
|||||||
Resources: d.getResourcesForServer(),
|
Resources: d.getResourcesForServer(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := d.Client.ContainerUpdate(context.Background(), d.Server.Uuid, u); err != nil {
|
d.Server.Log().WithField("limits", fmt.Sprintf("%+v", u.Resources)).Debug("updating server container on-the-fly with passed limits")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
|
defer cancel()
|
||||||
|
if _, err := d.Client.ContainerUpdate(ctx, d.Server.Id(), u); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,7 +167,7 @@ func (d *DockerEnvironment) InSituUpdate() error {
|
|||||||
// state. This ensures that unexpected container deletion while Wings is running does
|
// state. This ensures that unexpected container deletion while Wings is running does
|
||||||
// not result in the server becoming unbootable.
|
// not result in the server becoming unbootable.
|
||||||
func (d *DockerEnvironment) OnBeforeStart() error {
|
func (d *DockerEnvironment) OnBeforeStart() error {
|
||||||
zap.S().Infow("syncing server configuration with Panel", zap.String("server", d.Server.Uuid))
|
d.Server.Log().Info("syncing server configuration with panel")
|
||||||
if err := d.Server.Sync(); err != nil {
|
if err := d.Server.Sync(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -153,7 +178,7 @@ func (d *DockerEnvironment) OnBeforeStart() error {
|
|||||||
|
|
||||||
// Always destroy and re-create the server container to ensure that synced data from
|
// Always destroy and re-create the server container to ensure that synced data from
|
||||||
// the Panel is used.
|
// the Panel is used.
|
||||||
if err := d.Client.ContainerRemove(context.Background(), d.Server.Uuid, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
if err := d.Client.ContainerRemove(context.Background(), d.Server.Id(), types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
||||||
if !client.IsErrNotFound(err) {
|
if !client.IsErrNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -183,6 +208,10 @@ func (d *DockerEnvironment) Start() error {
|
|||||||
// that point.
|
// that point.
|
||||||
defer func() {
|
defer func() {
|
||||||
if sawError {
|
if sawError {
|
||||||
|
// If we don't set it to stopping first, you'll trigger crash detection which
|
||||||
|
// we don't want to do at this point since it'll just immediately try to do the
|
||||||
|
// exact same action that lead to it crashing in the first place...
|
||||||
|
d.Server.SetState(ProcessStoppingState)
|
||||||
d.Server.SetState(ProcessOfflineState)
|
d.Server.SetState(ProcessOfflineState)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -193,22 +222,37 @@ func (d *DockerEnvironment) Start() error {
|
|||||||
// Theoretically you'd have the Panel handle all of this logic, but we cannot do that
|
// Theoretically you'd have the Panel handle all of this logic, but we cannot do that
|
||||||
// because we allow the websocket to control the server power state as well, so we'll
|
// because we allow the websocket to control the server power state as well, so we'll
|
||||||
// need to handle that action in here.
|
// need to handle that action in here.
|
||||||
if d.Server.Suspended {
|
if d.Server.IsSuspended() {
|
||||||
return &suspendedError{}
|
return &suspendedError{}
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Uuid)
|
if c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err != nil {
|
||||||
if err != nil && !client.IsErrNotFound(err) {
|
// Do nothing if the container is not found, we just don't want to continue
|
||||||
|
// to the next block of code here. This check was inlined here to guard againt
|
||||||
|
// a nil-pointer when checking c.State below.
|
||||||
|
//
|
||||||
|
// @see https://github.com/pterodactyl/panel/issues/2000
|
||||||
|
if !client.IsErrNotFound(err) {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
// No reason to try starting a container that is already running.
|
// If the server is running update our internal state and continue on with the attach.
|
||||||
if c.State.Running {
|
if c.State.Running {
|
||||||
d.Server.SetState(ProcessRunningState)
|
d.Server.SetState(ProcessRunningState)
|
||||||
|
|
||||||
return d.Attach()
|
return d.Attach()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Truncate the log file so we don't end up outputting a bunch of useless log information
|
||||||
|
// to the websocket and whatnot. Check first that the path and file exist before trying
|
||||||
|
// to truncate them.
|
||||||
|
if _, err := os.Stat(c.LogPath); err == nil {
|
||||||
|
if err := os.Truncate(c.LogPath, 0); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
d.Server.SetState(ProcessStartingState)
|
d.Server.SetState(ProcessStartingState)
|
||||||
// Set this to true for now, we will set it to false once we reach the
|
// Set this to true for now, we will set it to false once we reach the
|
||||||
// end of this chain.
|
// end of this chain.
|
||||||
@@ -221,15 +265,6 @@ func (d *DockerEnvironment) Start() error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate the log file so we don't end up outputting a bunch of useless log information
|
|
||||||
// to the websocket and whatnot. Check first that the path and file exist before trying
|
|
||||||
// to truncate them.
|
|
||||||
if _, err := os.Stat(c.LogPath); err == nil {
|
|
||||||
if err := os.Truncate(c.LogPath, 0); err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the configuration files defined for the server before beginning the boot process.
|
// Update the configuration files defined for the server before beginning the boot process.
|
||||||
// This process executes a bunch of parallel updates, so we just block until that process
|
// This process executes a bunch of parallel updates, so we just block until that process
|
||||||
// is completed. Any errors as a result of this will just be bubbled out in the logger,
|
// is completed. Any errors as a result of this will just be bubbled out in the logger,
|
||||||
@@ -243,8 +278,10 @@ func (d *DockerEnvironment) Start() error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
opts := types.ContainerStartOptions{}
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
if err := d.Client.ContainerStart(context.Background(), d.Server.Uuid, opts); err != nil {
|
defer cancel()
|
||||||
|
|
||||||
|
if err := d.Client.ContainerStart(ctx, d.Server.Id(), types.ContainerStartOptions{}); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -257,19 +294,89 @@ func (d *DockerEnvironment) Start() error {
|
|||||||
// Stops the container that the server is running in. This will allow up to 10
|
// Stops the container that the server is running in. This will allow up to 10
|
||||||
// seconds to pass before a failure occurs.
|
// seconds to pass before a failure occurs.
|
||||||
func (d *DockerEnvironment) Stop() error {
|
func (d *DockerEnvironment) Stop() error {
|
||||||
stop := d.Server.processConfiguration.Stop
|
stop := d.Server.ProcessConfiguration().Stop
|
||||||
if stop.Type == api.ProcessStopSignal {
|
if stop.Type == api.ProcessStopSignal {
|
||||||
return d.Terminate(os.Kill)
|
return d.Terminate(os.Kill)
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Server.SetState(ProcessStoppingState)
|
d.Server.SetState(ProcessStoppingState)
|
||||||
if stop.Type == api.ProcessStopCommand {
|
// Only attempt to send the stop command to the instance if we are actually attached to
|
||||||
|
// the instance. If we are not for some reason, just send the container stop event.
|
||||||
|
if d.IsAttached() && stop.Type == api.ProcessStopCommand {
|
||||||
return d.SendCommand(stop.Value)
|
return d.SendCommand(stop.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
t := time.Second * 10
|
t := time.Second * 10
|
||||||
|
|
||||||
return d.Client.ContainerStop(context.Background(), d.Server.Uuid, &t)
|
err := d.Client.ContainerStop(context.Background(), d.Server.Id(), &t)
|
||||||
|
if err != nil {
|
||||||
|
// If the container does not exist just mark the process as stopped and return without
|
||||||
|
// an error.
|
||||||
|
if client.IsErrNotFound(err) {
|
||||||
|
d.SetAttached(false)
|
||||||
|
d.Server.SetState(ProcessOfflineState)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to acquire a lock to restart the server. If one cannot be obtained within 5 seconds return
|
||||||
|
// an error to the caller. You should ideally be checking IsRestarting() before calling this function
|
||||||
|
// to avoid unnecessary delays since you can respond immediately from that.
|
||||||
|
func (d *DockerEnvironment) acquireRestartLock() error {
|
||||||
|
if d.restartSem == nil {
|
||||||
|
d.restartSem = semaphore.NewWeighted(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
|
||||||
|
|
||||||
|
return d.restartSem.Acquire(ctx, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restarts the server process by waiting for the process to gracefully stop and then triggering a
|
||||||
|
// start command. This will return an error if there is already a restart process executing for the
|
||||||
|
// server. The lock is released when the process is stopped and a start has begun.
|
||||||
|
func (d *DockerEnvironment) Restart() error {
|
||||||
|
d.Server.Log().Debug("attempting to acquire restart lock...")
|
||||||
|
if err := d.acquireRestartLock(); err != nil {
|
||||||
|
d.Server.Log().Warn("failed to acquire restart lock; already acquired by a different process")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Server.Log().Debug("acquired restart lock")
|
||||||
|
|
||||||
|
err := d.WaitForStop(60, false)
|
||||||
|
if err != nil {
|
||||||
|
d.restartSem.Release(1)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release the restart lock, it is now safe for someone to attempt restarting the server again.
|
||||||
|
d.restartSem.Release(1)
|
||||||
|
|
||||||
|
// Start the process.
|
||||||
|
return d.Start()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the server is currently running the restart process by checking if there is a semaphore
|
||||||
|
// allocated, and if so, if we can aquire a lock on it.
|
||||||
|
func (d *DockerEnvironment) IsRestarting() bool {
|
||||||
|
if d.restartSem == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.restartSem.TryAcquire(1) {
|
||||||
|
d.restartSem.Release(1)
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempts to gracefully stop a server using the defined stop command. If the server
|
// Attempts to gracefully stop a server using the defined stop command. If the server
|
||||||
@@ -290,7 +397,7 @@ func (d *DockerEnvironment) WaitForStop(seconds int, terminate bool) error {
|
|||||||
// Block the return of this function until the container as been marked as no
|
// Block the return of this function until the container as been marked as no
|
||||||
// longer running. If this wait does not end by the time seconds have passed,
|
// longer running. If this wait does not end by the time seconds have passed,
|
||||||
// attempt to terminate the container, or return an error.
|
// attempt to terminate the container, or return an error.
|
||||||
ok, errChan := d.Client.ContainerWait(ctx, d.Server.Uuid, container.WaitConditionNotRunning)
|
ok, errChan := d.Client.ContainerWait(ctx, d.Server.Id(), container.WaitConditionNotRunning)
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||||
@@ -312,9 +419,7 @@ func (d *DockerEnvironment) WaitForStop(seconds int, terminate bool) error {
|
|||||||
|
|
||||||
// Forcefully terminates the container using the signal passed through.
|
// Forcefully terminates the container using the signal passed through.
|
||||||
func (d *DockerEnvironment) Terminate(signal os.Signal) error {
|
func (d *DockerEnvironment) Terminate(signal os.Signal) error {
|
||||||
ctx := context.Background()
|
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||||
|
|
||||||
c, err := d.Client.ContainerInspect(ctx, d.Server.Uuid)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
@@ -326,30 +431,51 @@ func (d *DockerEnvironment) Terminate(signal os.Signal) error {
|
|||||||
d.Server.SetState(ProcessStoppingState)
|
d.Server.SetState(ProcessStoppingState)
|
||||||
|
|
||||||
return d.Client.ContainerKill(
|
return d.Client.ContainerKill(
|
||||||
ctx, d.Server.Uuid, strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed"),
|
context.Background(), d.Server.Id(), strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed"),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the Docker container from the machine. If the container is currently running
|
// Remove the Docker container from the machine. If the container is currently running
|
||||||
// it will be forcibly stopped by Docker.
|
// it will be forcibly stopped by Docker.
|
||||||
func (d *DockerEnvironment) Destroy() error {
|
func (d *DockerEnvironment) Destroy() error {
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
// Avoid crash detection firing off.
|
// Avoid crash detection firing off.
|
||||||
d.Server.SetState(ProcessStoppingState)
|
d.Server.SetState(ProcessStoppingState)
|
||||||
|
|
||||||
return d.Client.ContainerRemove(ctx, d.Server.Uuid, types.ContainerRemoveOptions{
|
err := d.Client.ContainerRemove(context.Background(), d.Server.Id(), types.ContainerRemoveOptions{
|
||||||
RemoveVolumes: true,
|
RemoveVolumes: true,
|
||||||
RemoveLinks: false,
|
RemoveLinks: false,
|
||||||
Force: true,
|
Force: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Don't trigger a destroy failure if we try to delete a container that does not
|
||||||
|
// exist on the system. We're just a step ahead of ourselves in that case.
|
||||||
|
//
|
||||||
|
// @see https://github.com/pterodactyl/panel/issues/2001
|
||||||
|
if err != nil && client.IsErrNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determine the container exit state and return the exit code and wether or not
|
// Determine the container exit state and return the exit code and wether or not
|
||||||
// the container was killed by the OOM killer.
|
// the container was killed by the OOM killer.
|
||||||
func (d *DockerEnvironment) ExitState() (uint32, bool, error) {
|
func (d *DockerEnvironment) ExitState() (uint32, bool, error) {
|
||||||
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Uuid)
|
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// I'm not entirely sure how this can happen to be honest. I tried deleting a
|
||||||
|
// container _while_ a server was running and wings gracefully saw the crash and
|
||||||
|
// created a new container for it.
|
||||||
|
//
|
||||||
|
// However, someone reported an error in Discord about this scenario happening,
|
||||||
|
// so I guess this should prevent it? They didn't tell me how they caused it though
|
||||||
|
// so thats a mystery that will have to go unsolved.
|
||||||
|
//
|
||||||
|
// @see https://github.com/pterodactyl/panel/issues/2003
|
||||||
|
if client.IsErrNotFound(err) {
|
||||||
|
return 1, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
return 0, false, errors.WithStack(err)
|
return 0, false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -361,7 +487,7 @@ func (d *DockerEnvironment) ExitState() (uint32, bool, error) {
|
|||||||
// miss important output at the beginning because of the time delay with attaching to the
|
// miss important output at the beginning because of the time delay with attaching to the
|
||||||
// output.
|
// output.
|
||||||
func (d *DockerEnvironment) Attach() error {
|
func (d *DockerEnvironment) Attach() error {
|
||||||
if d.attached {
|
if d.IsAttached() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -369,10 +495,8 @@ func (d *DockerEnvironment) Attach() error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
d.stream, err = d.Client.ContainerAttach(ctx, d.Server.Uuid, types.ContainerAttachOptions{
|
d.stream, err = d.Client.ContainerAttach(context.Background(), d.Server.Id(), types.ContainerAttachOptions{
|
||||||
Stdin: true,
|
Stdin: true,
|
||||||
Stdout: true,
|
Stdout: true,
|
||||||
Stderr: true,
|
Stderr: true,
|
||||||
@@ -387,10 +511,10 @@ func (d *DockerEnvironment) Attach() error {
|
|||||||
Server: d.Server,
|
Server: d.Server,
|
||||||
}
|
}
|
||||||
|
|
||||||
d.attached = true
|
d.SetAttached(true)
|
||||||
go func() {
|
go func() {
|
||||||
if err := d.EnableResourcePolling(); err != nil {
|
if err := d.EnableResourcePolling(); err != nil {
|
||||||
zap.S().Warnw("failed to enabled resource polling on server", zap.String("server", d.Server.Uuid), zap.Error(errors.WithStack(err)))
|
d.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to enable resource polling on server")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -398,7 +522,7 @@ func (d *DockerEnvironment) Attach() error {
|
|||||||
defer d.stream.Close()
|
defer d.stream.Close()
|
||||||
defer func() {
|
defer func() {
|
||||||
d.Server.SetState(ProcessOfflineState)
|
d.Server.SetState(ProcessOfflineState)
|
||||||
d.attached = false
|
d.SetAttached(false)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
io.Copy(console, d.stream.Reader)
|
io.Copy(console, d.stream.Reader)
|
||||||
@@ -416,10 +540,9 @@ func (d *DockerEnvironment) FollowConsoleOutput() error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.New(fmt.Sprintf("no such container: %s", d.Server.Uuid))
|
return errors.New(fmt.Sprintf("no such container: %s", d.Server.Id()))
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
opts := types.ContainerLogsOptions{
|
opts := types.ContainerLogsOptions{
|
||||||
ShowStderr: true,
|
ShowStderr: true,
|
||||||
ShowStdout: true,
|
ShowStdout: true,
|
||||||
@@ -427,7 +550,7 @@ func (d *DockerEnvironment) FollowConsoleOutput() error {
|
|||||||
Since: time.Now().Format(time.RFC3339),
|
Since: time.Now().Format(time.RFC3339),
|
||||||
}
|
}
|
||||||
|
|
||||||
reader, err := d.Client.ContainerLogs(ctx, d.Server.Uuid, opts)
|
reader, err := d.Client.ContainerLogs(context.Background(), d.Server.Id(), opts)
|
||||||
|
|
||||||
go func(r io.ReadCloser) {
|
go func(r io.ReadCloser) {
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
@@ -438,7 +561,7 @@ func (d *DockerEnvironment) FollowConsoleOutput() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
zap.S().Warnw("error processing scanner line in console output", zap.String("server", d.Server.Uuid), zap.Error(err))
|
d.Server.Log().WithField("error", err).Warn("error processing scanner line in console output")
|
||||||
}
|
}
|
||||||
}(reader)
|
}(reader)
|
||||||
|
|
||||||
@@ -453,9 +576,7 @@ func (d *DockerEnvironment) EnableResourcePolling() error {
|
|||||||
return errors.New("cannot enable resource polling on a server that is not running")
|
return errors.New("cannot enable resource polling on a server that is not running")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
stats, err := d.Client.ContainerStats(context.Background(), d.Server.Id(), true)
|
||||||
|
|
||||||
stats, err := d.Client.ContainerStats(ctx, d.Server.Uuid, true)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
@@ -468,7 +589,7 @@ func (d *DockerEnvironment) EnableResourcePolling() error {
|
|||||||
|
|
||||||
if err := dec.Decode(&v); err != nil {
|
if err := dec.Decode(&v); err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
zap.S().Warnw("encountered error processing server stats; stopping collection", zap.Error(err))
|
d.Server.Log().WithField("error", err).Warn("encountered error processing server stats, stopping collection")
|
||||||
}
|
}
|
||||||
|
|
||||||
d.DisableResourcePolling()
|
d.DisableResourcePolling()
|
||||||
@@ -482,20 +603,16 @@ func (d *DockerEnvironment) EnableResourcePolling() error {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Resources.CpuAbsolute = s.Resources.CalculateAbsoluteCpu(&v.PreCPUStats, &v.CPUStats)
|
s.Proc().UpdateFromDocker(v)
|
||||||
s.Resources.Memory = v.MemoryStats.Usage
|
for _, nw := range v.Networks {
|
||||||
s.Resources.MemoryLimit = v.MemoryStats.Limit
|
s.Proc().UpdateNetworkBytes(&nw)
|
||||||
|
}
|
||||||
|
|
||||||
// Why you ask? This already has the logic for caching disk space in use and then
|
// Why you ask? This already has the logic for caching disk space in use and then
|
||||||
// also handles pushing that value to the resources object automatically.
|
// also handles pushing that value to the resources object automatically.
|
||||||
s.Filesystem.HasSpaceAvailable()
|
s.Filesystem.HasSpaceAvailable()
|
||||||
|
|
||||||
for _, nw := range v.Networks {
|
b, _ := json.Marshal(s.Proc())
|
||||||
s.Resources.Network.RxBytes += nw.RxBytes
|
|
||||||
s.Resources.Network.TxBytes += nw.TxBytes
|
|
||||||
}
|
|
||||||
|
|
||||||
b, _ := json.Marshal(s.Resources)
|
|
||||||
s.Events().Publish(StatsEvent, string(b))
|
s.Events().Publish(StatsEvent, string(b))
|
||||||
}
|
}
|
||||||
}(d.Server)
|
}(d.Server)
|
||||||
@@ -510,26 +627,64 @@ func (d *DockerEnvironment) DisableResourcePolling() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
err := d.stats.Close()
|
err := d.stats.Close()
|
||||||
|
d.Server.Proc().Empty()
|
||||||
d.Server.Resources.CpuAbsolute = 0
|
|
||||||
d.Server.Resources.Memory = 0
|
|
||||||
d.Server.Resources.Network.TxBytes = 0
|
|
||||||
d.Server.Resources.Network.RxBytes = 0
|
|
||||||
|
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pulls the image from Docker.
|
// Returns the image to be used for the instance.
|
||||||
|
func (d *DockerEnvironment) Image() string {
|
||||||
|
return d.Server.Config().Container.Image
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pulls the image from Docker. If there is an error while pulling the image from the source
|
||||||
|
// but the image already exists locally, we will report that error to the logger but continue
|
||||||
|
// with the process.
|
||||||
|
//
|
||||||
|
// The reasoning behind this is that Quay has had some serious outages as of late, and we don't
|
||||||
|
// need to block all of the servers from booting just because of that. I'd imagine in a lot of
|
||||||
|
// cases an outage shouldn't affect users too badly. It'll at least keep existing servers working
|
||||||
|
// correctly if anything.
|
||||||
//
|
//
|
||||||
// @todo handle authorization & local images
|
// @todo handle authorization & local images
|
||||||
func (d *DockerEnvironment) ensureImageExists(c *client.Client) error {
|
func (d *DockerEnvironment) ensureImageExists() error {
|
||||||
out, err := c.ImagePull(context.Background(), d.Server.Container.Image, types.ImagePullOptions{All: false})
|
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
|
||||||
|
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
|
||||||
|
// an image. Let me know when I am inevitably wrong here...
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
out, err := d.Client.ImagePull(ctx, d.Image(), types.ImagePullOptions{All: false})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
images, ierr := d.Client.ImageList(ctx, types.ImageListOptions{})
|
||||||
|
if ierr != nil {
|
||||||
|
// Well damn, something has gone really wrong here, just go ahead and abort there
|
||||||
|
// isn't much anything we can do to try and self-recover from this.
|
||||||
|
return ierr
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, img := range images {
|
||||||
|
for _, t := range img.RepoTags {
|
||||||
|
if t != d.Image() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Server.Log().WithFields(log.Fields{
|
||||||
|
"image": d.Image(),
|
||||||
|
"error": errors.New(err.Error()),
|
||||||
|
}).Warn("unable to pull requested image from remote source, however the image exists locally")
|
||||||
|
|
||||||
|
// Okay, we found a matching container image, in that case just go ahead and return
|
||||||
|
// from this function, since there is nothing else we need to do here.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer out.Close()
|
defer out.Close()
|
||||||
|
|
||||||
zap.S().Debugw("pulling docker image... this could take a bit of time", zap.String("image", d.Server.Container.Image))
|
log.WithField("image", d.Image()).Debug("pulling docker image... this could take a bit of time")
|
||||||
|
|
||||||
// I'm not sure what the best approach here is, but this will block execution until the image
|
// I'm not sure what the best approach here is, but this will block execution until the image
|
||||||
// is done being pulled, which is what we need.
|
// is done being pulled, which is what we need.
|
||||||
@@ -547,15 +702,7 @@ func (d *DockerEnvironment) ensureImageExists(c *client.Client) error {
|
|||||||
|
|
||||||
// Creates a new container for the server using all of the data that is currently
|
// Creates a new container for the server using all of the data that is currently
|
||||||
// available for it. If the container already exists it will be returned.
|
// available for it. If the container already exists it will be returned.
|
||||||
//
|
|
||||||
// @todo pull the image being requested if it doesn't exist currently.
|
|
||||||
func (d *DockerEnvironment) Create() error {
|
func (d *DockerEnvironment) Create() error {
|
||||||
ctx := context.Background()
|
|
||||||
cli, err := client.NewClientWithOpts(client.FromEnv)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure the data directory exists before getting too far through this process.
|
// Ensure the data directory exists before getting too far through this process.
|
||||||
if err := d.Server.Filesystem.EnsureDataDirectory(); err != nil {
|
if err := d.Server.Filesystem.EnsureDataDirectory(); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
@@ -564,50 +711,87 @@ func (d *DockerEnvironment) Create() error {
|
|||||||
// If the container already exists don't hit the user with an error, just return
|
// If the container already exists don't hit the user with an error, just return
|
||||||
// the current information about it which is what we would do when creating the
|
// the current information about it which is what we would do when creating the
|
||||||
// container anyways.
|
// container anyways.
|
||||||
if _, err := cli.ContainerInspect(ctx, d.Server.Uuid); err == nil {
|
if _, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else if !client.IsErrNotFound(err) {
|
} else if !client.IsErrNotFound(err) {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to pull the requested image before creating the container.
|
// Try to pull the requested image before creating the container.
|
||||||
if err := d.ensureImageExists(cli); err != nil {
|
if err := d.ensureImageExists(); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
conf := &container.Config{
|
conf := &container.Config{
|
||||||
Hostname: "container",
|
Hostname: d.Server.Id(),
|
||||||
|
Domainname: config.Get().Docker.Domainname,
|
||||||
User: strconv.Itoa(config.Get().System.User.Uid),
|
User: strconv.Itoa(config.Get().System.User.Uid),
|
||||||
AttachStdin: true,
|
AttachStdin: true,
|
||||||
AttachStdout: true,
|
AttachStdout: true,
|
||||||
AttachStderr: true,
|
AttachStderr: true,
|
||||||
OpenStdin: true,
|
OpenStdin: true,
|
||||||
Tty: true,
|
Tty: true,
|
||||||
|
|
||||||
ExposedPorts: d.exposedPorts(),
|
ExposedPorts: d.exposedPorts(),
|
||||||
|
Image: d.Image(),
|
||||||
Image: d.Server.Container.Image,
|
Env: d.Server.GetEnvironmentVariables(),
|
||||||
Env: d.environmentVariables(),
|
|
||||||
|
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"Service": "Pterodactyl",
|
"Service": "Pterodactyl",
|
||||||
"ContainerType": "server_process",
|
"ContainerType": "server_process",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mounts := []mount.Mount{
|
||||||
|
{
|
||||||
|
Target: "/home/container",
|
||||||
|
Source: d.Server.Filesystem.Path(),
|
||||||
|
Type: mount.TypeBind,
|
||||||
|
ReadOnly: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var mounted bool
|
||||||
|
for _, m := range d.Server.Config().Mounts {
|
||||||
|
mounted = false
|
||||||
|
source := filepath.Clean(m.Source)
|
||||||
|
target := filepath.Clean(m.Target)
|
||||||
|
|
||||||
|
for _, allowed := range config.Get().AllowedMounts {
|
||||||
|
if !strings.HasPrefix(source, allowed) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mounts = append(mounts, mount.Mount{
|
||||||
|
Type: mount.TypeBind,
|
||||||
|
|
||||||
|
Source: source,
|
||||||
|
Target: target,
|
||||||
|
ReadOnly: m.ReadOnly,
|
||||||
|
})
|
||||||
|
|
||||||
|
mounted = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := log.WithFields(log.Fields{
|
||||||
|
"server": d.Server.Id(),
|
||||||
|
"source_path": source,
|
||||||
|
"target_path": target,
|
||||||
|
"read_only": m.ReadOnly,
|
||||||
|
})
|
||||||
|
|
||||||
|
if mounted {
|
||||||
|
logger.Debug("attaching mount to server's container")
|
||||||
|
} else {
|
||||||
|
logger.Warn("skipping mount because it isn't allowed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
hostConf := &container.HostConfig{
|
hostConf := &container.HostConfig{
|
||||||
PortBindings: d.portBindings(),
|
PortBindings: d.portBindings(),
|
||||||
|
|
||||||
// Configure the mounts for this container. First mount the server data directory
|
// Configure the mounts for this container. First mount the server data directory
|
||||||
// into the container as a r/w bind.
|
// into the container as a r/w bind.
|
||||||
Mounts: []mount.Mount{
|
Mounts: mounts,
|
||||||
{
|
|
||||||
Target: "/home/container",
|
|
||||||
Source: d.Server.Filesystem.Path(),
|
|
||||||
Type: mount.TypeBind,
|
|
||||||
ReadOnly: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
// Configure the /tmp folder mapping in containers. This is necessary for some
|
// Configure the /tmp folder mapping in containers. This is necessary for some
|
||||||
// games that need to make use of it for downloads and other installation processes.
|
// games that need to make use of it for downloads and other installation processes.
|
||||||
@@ -639,21 +823,10 @@ func (d *DockerEnvironment) Create() error {
|
|||||||
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
||||||
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
||||||
},
|
},
|
||||||
NetworkMode: "pterodactyl_nw",
|
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pretty sure TZ=X in the environment variables negates the need for this
|
if _, err := d.Client.ContainerCreate(context.Background(), conf, hostConf, nil, d.Server.Id()); err != nil {
|
||||||
// to happen. Leaving it until I can confirm that works for everything.
|
|
||||||
//
|
|
||||||
// if err := mountTimezoneData(hostConf); err != nil {
|
|
||||||
// if os.IsNotExist(err) {
|
|
||||||
// zap.S().Warnw("the timezone data path configured does not exist on the system", zap.Error(errors.WithStack(err)))
|
|
||||||
// } else {
|
|
||||||
// zap.S().Warnw("failed to mount timezone data into container", zap.Error(errors.WithStack(err)))
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
if _, err := cli.ContainerCreate(ctx, conf, hostConf, nil, d.Server.Uuid); err != nil {
|
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -663,7 +836,7 @@ func (d *DockerEnvironment) Create() error {
|
|||||||
// Sends the specified command to the stdin of the running container instance. There is no
|
// Sends the specified command to the stdin of the running container instance. There is no
|
||||||
// confirmation that this data is sent successfully, only that it gets pushed into the stdin.
|
// confirmation that this data is sent successfully, only that it gets pushed into the stdin.
|
||||||
func (d *DockerEnvironment) SendCommand(c string) error {
|
func (d *DockerEnvironment) SendCommand(c string) error {
|
||||||
if !d.attached {
|
if !d.IsAttached() {
|
||||||
return errors.New("attempting to send command to non-attached instance")
|
return errors.New("attempting to send command to non-attached instance")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -675,9 +848,7 @@ func (d *DockerEnvironment) SendCommand(c string) error {
|
|||||||
// Reads the log file for the server. This does not care if the server is running or not, it will
|
// Reads the log file for the server. This does not care if the server is running or not, it will
|
||||||
// simply try to read the last X bytes of the file and return them.
|
// simply try to read the last X bytes of the file and return them.
|
||||||
func (d *DockerEnvironment) Readlog(len int64) ([]string, error) {
|
func (d *DockerEnvironment) Readlog(len int64) ([]string, error) {
|
||||||
ctx := context.Background()
|
j, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
||||||
|
|
||||||
j, err := d.Client.ContainerInspect(ctx, d.Server.Uuid)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -749,42 +920,12 @@ func (d *DockerEnvironment) parseLogToStrings(b []byte) ([]string, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the environment variables for a server in KEY="VALUE" form.
|
|
||||||
func (d *DockerEnvironment) environmentVariables() []string {
|
|
||||||
zone, _ := time.Now().In(time.Local).Zone()
|
|
||||||
|
|
||||||
var out = []string{
|
|
||||||
fmt.Sprintf("TZ=%s", zone),
|
|
||||||
fmt.Sprintf("STARTUP=%s", d.Server.Invocation),
|
|
||||||
fmt.Sprintf("SERVER_MEMORY=%d", d.Server.Build.MemoryLimit),
|
|
||||||
fmt.Sprintf("SERVER_IP=%s", d.Server.Allocations.DefaultMapping.Ip),
|
|
||||||
fmt.Sprintf("SERVER_PORT=%d", d.Server.Allocations.DefaultMapping.Port),
|
|
||||||
}
|
|
||||||
|
|
||||||
eloop:
|
|
||||||
for k, v := range d.Server.EnvVars {
|
|
||||||
for _, e := range out {
|
|
||||||
if strings.HasPrefix(e, strings.ToUpper(k)) {
|
|
||||||
continue eloop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
out = append(out, fmt.Sprintf("%s=\"%s\"", strings.ToUpper(k), v))
|
|
||||||
}
|
|
||||||
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DockerEnvironment) volumes() map[string]struct{} {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Converts the server allocation mappings into a format that can be understood
|
// Converts the server allocation mappings into a format that can be understood
|
||||||
// by Docker.
|
// by Docker.
|
||||||
func (d *DockerEnvironment) portBindings() nat.PortMap {
|
func (d *DockerEnvironment) portBindings() nat.PortMap {
|
||||||
var out = nat.PortMap{}
|
var out = nat.PortMap{}
|
||||||
|
|
||||||
for ip, ports := range d.Server.Allocations.Mappings {
|
for ip, ports := range d.Server.Config().Allocations.Mappings {
|
||||||
for _, port := range ports {
|
for _, port := range ports {
|
||||||
// Skip over invalid ports.
|
// Skip over invalid ports.
|
||||||
if port < 0 || port > 65535 {
|
if port < 0 || port > 65535 {
|
||||||
@@ -824,26 +965,24 @@ func (d *DockerEnvironment) exposedPorts() nat.PortSet {
|
|||||||
|
|
||||||
// Formats the resources available to a server instance in such as way that Docker will
|
// Formats the resources available to a server instance in such as way that Docker will
|
||||||
// generate a matching environment in the container.
|
// generate a matching environment in the container.
|
||||||
|
//
|
||||||
|
// This will set the actual memory limit on the container using the multiplier which is the
|
||||||
|
// hard limit for the container (after which will result in a crash). We then set the
|
||||||
|
// reservation to be the expected memory limit based on simply multiplication.
|
||||||
|
//
|
||||||
|
// The swap value is either -1 to disable it, or set to the value of the hard memory limit
|
||||||
|
// plus the additional swap assigned to the server since Docker expects this value to be
|
||||||
|
// the same or higher than the memory limit.
|
||||||
func (d *DockerEnvironment) getResourcesForServer() container.Resources {
|
func (d *DockerEnvironment) getResourcesForServer() container.Resources {
|
||||||
overhead := 1.05
|
|
||||||
// Set the hard limit for memory usage to be 5% more than the amount of memory assigned to
|
|
||||||
// the server. If the memory limit for the server is < 4G, use 10%, if less than 2G use
|
|
||||||
// 15%. This avoids unexpected crashes from processes like Java which run over the limit.
|
|
||||||
if d.Server.Build.MemoryLimit <= 2048 {
|
|
||||||
overhead = 1.15
|
|
||||||
} else if d.Server.Build.MemoryLimit <= 4096 {
|
|
||||||
overhead = 1.10;
|
|
||||||
}
|
|
||||||
|
|
||||||
return container.Resources{
|
return container.Resources{
|
||||||
Memory: int64(math.Round(float64(d.Server.Build.MemoryLimit) * 1000000.0 * overhead)),
|
Memory: d.Server.Build().BoundedMemoryLimit(),
|
||||||
MemoryReservation: d.Server.Build.MemoryLimit * 1000000,
|
MemoryReservation: d.Server.Build().MemoryLimit * 1_000_000,
|
||||||
MemorySwap: d.Server.Build.ConvertedSwap(),
|
MemorySwap: d.Server.Build().ConvertedSwap(),
|
||||||
CPUQuota: d.Server.Build.ConvertedCpuLimit(),
|
CPUQuota: d.Server.Build().ConvertedCpuLimit(),
|
||||||
CPUPeriod: 100000,
|
CPUPeriod: 100_000,
|
||||||
CPUShares: 1024,
|
CPUShares: 1024,
|
||||||
BlkioWeight: d.Server.Build.IoWeight,
|
BlkioWeight: d.Server.Build().IoWeight,
|
||||||
OomKillDisable: &d.Server.Container.OomDisabled,
|
OomKillDisable: &d.Server.Config().Container.OomDisabled,
|
||||||
CpusetCpus: d.Server.Build.Threads,
|
CpusetCpus: d.Server.Build().Threads,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,8 +23,9 @@ type Event struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type EventBus struct {
|
type EventBus struct {
|
||||||
|
sync.RWMutex
|
||||||
|
|
||||||
subscribers map[string][]chan Event
|
subscribers map[string][]chan Event
|
||||||
mu sync.Mutex
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the server's emitter instance.
|
// Returns the server's emitter instance.
|
||||||
@@ -40,8 +41,8 @@ func (s *Server) Events() *EventBus {
|
|||||||
|
|
||||||
// Publish data to a given topic.
|
// Publish data to a given topic.
|
||||||
func (e *EventBus) Publish(topic string, data string) {
|
func (e *EventBus) Publish(topic string, data string) {
|
||||||
e.mu.Lock()
|
e.RLock()
|
||||||
defer e.mu.Unlock()
|
defer e.RUnlock()
|
||||||
|
|
||||||
t := topic
|
t := topic
|
||||||
// Some of our topics for the socket support passing a more specific namespace,
|
// Some of our topics for the socket support passing a more specific namespace,
|
||||||
@@ -79,8 +80,8 @@ func (e *EventBus) PublishJson(topic string, data interface{}) error {
|
|||||||
|
|
||||||
// Subscribe to an emitter topic using a channel.
|
// Subscribe to an emitter topic using a channel.
|
||||||
func (e *EventBus) Subscribe(topic string, ch chan Event) {
|
func (e *EventBus) Subscribe(topic string, ch chan Event) {
|
||||||
e.mu.Lock()
|
e.Lock()
|
||||||
defer e.mu.Unlock()
|
defer e.Unlock()
|
||||||
|
|
||||||
if p, ok := e.subscribers[topic]; ok {
|
if p, ok := e.subscribers[topic]; ok {
|
||||||
e.subscribers[topic] = append(p, ch)
|
e.subscribers[topic] = append(p, ch)
|
||||||
@@ -91,8 +92,8 @@ func (e *EventBus) Subscribe(topic string, ch chan Event) {
|
|||||||
|
|
||||||
// Unsubscribe a channel from a topic.
|
// Unsubscribe a channel from a topic.
|
||||||
func (e *EventBus) Unsubscribe(topic string, ch chan Event) {
|
func (e *EventBus) Unsubscribe(topic string, ch chan Event) {
|
||||||
e.mu.Lock()
|
e.Lock()
|
||||||
defer e.mu.Unlock()
|
defer e.Unlock()
|
||||||
|
|
||||||
if _, ok := e.subscribers[topic]; ok {
|
if _, ok := e.subscribers[topic]; ok {
|
||||||
for i := range e.subscribers[topic] {
|
for i := range e.subscribers[topic] {
|
||||||
@@ -102,3 +103,18 @@ func (e *EventBus) Unsubscribe(topic string, ch chan Event) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Removes all of the event listeners for the server. This is used when a server
|
||||||
|
// is being deleted to avoid a bunch of de-reference errors cropping up. Obviously
|
||||||
|
// should also check elsewhere and handle a server reference going nil, but this
|
||||||
|
// won't hurt.
|
||||||
|
func (e *EventBus) UnsubscribeAll() {
|
||||||
|
e.Lock()
|
||||||
|
defer e.Unlock()
|
||||||
|
|
||||||
|
// Loop over all of the subscribers and just remove all of the events
|
||||||
|
// for them.
|
||||||
|
for t := range e.subscribers {
|
||||||
|
e.subscribers[t] = make([]chan Event, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/server/backup"
|
"github.com/pterodactyl/wings/server/backup"
|
||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
"go.uber.org/zap"
|
"golang.org/x/sync/errgroup"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@@ -26,18 +26,27 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Error returned when there is a bad path provided to one of the FS calls.
|
// Error returned when there is a bad path provided to one of the FS calls.
|
||||||
var InvalidPathResolution = errors.New("invalid path resolution")
|
type PathResolutionError struct{}
|
||||||
|
|
||||||
|
// Returns the error response in a string form that can be more easily consumed.
|
||||||
|
func (pre PathResolutionError) Error() string {
|
||||||
|
return "invalid path resolution"
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsPathResolutionError(err error) bool {
|
||||||
|
_, ok := err.(PathResolutionError)
|
||||||
|
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
type Filesystem struct {
|
type Filesystem struct {
|
||||||
// The server object associated with this Filesystem.
|
|
||||||
Server *Server
|
Server *Server
|
||||||
|
cacheDiskMu sync.Mutex
|
||||||
Configuration *config.SystemConfiguration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the root path that contains all of a server's data.
|
// Returns the root path that contains all of a server's data.
|
||||||
func (fs *Filesystem) Path() string {
|
func (fs *Filesystem) Path() string {
|
||||||
return filepath.Join(fs.Configuration.Data, fs.Server.Uuid)
|
return filepath.Join(config.Get().System.Data, fs.Server.Id())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normalizes a directory being passed in to ensure the user is not able to escape
|
// Normalizes a directory being passed in to ensure the user is not able to escape
|
||||||
@@ -49,12 +58,8 @@ func (fs *Filesystem) Path() string {
|
|||||||
func (fs *Filesystem) SafePath(p string) (string, error) {
|
func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||||
var nonExistentPathResolution string
|
var nonExistentPathResolution string
|
||||||
|
|
||||||
// Calling filpath.Clean on the joined directory will resolve it to the absolute path,
|
// Start with a cleaned up path before checking the more complex bits.
|
||||||
// removing any ../ type of resolution arguments, and leaving us with a direct path link.
|
r := fs.unsafeFilePath(p)
|
||||||
//
|
|
||||||
// This will also trim the existing root path off the beginning of the path passed to
|
|
||||||
// the function since that can get a bit messy.
|
|
||||||
r := filepath.Clean(filepath.Join(fs.Path(), strings.TrimPrefix(p, fs.Path())))
|
|
||||||
|
|
||||||
// At the same time, evaluate the symlink status and determine where this file or folder
|
// At the same time, evaluate the symlink status and determine where this file or folder
|
||||||
// is truly pointing to.
|
// is truly pointing to.
|
||||||
@@ -72,7 +77,7 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
|||||||
for k := range parts {
|
for k := range parts {
|
||||||
try = strings.Join(parts[:(len(parts)-k)], "/")
|
try = strings.Join(parts[:(len(parts)-k)], "/")
|
||||||
|
|
||||||
if !strings.HasPrefix(try, fs.Path()) {
|
if !fs.unsafeIsInDataDirectory(try) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,8 +92,8 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
|||||||
// If the new path doesn't start with their root directory there is clearly an escape
|
// If the new path doesn't start with their root directory there is clearly an escape
|
||||||
// attempt going on, and we should NOT resolve this path for them.
|
// attempt going on, and we should NOT resolve this path for them.
|
||||||
if nonExistentPathResolution != "" {
|
if nonExistentPathResolution != "" {
|
||||||
if !strings.HasPrefix(nonExistentPathResolution, fs.Path()) {
|
if !fs.unsafeIsInDataDirectory(nonExistentPathResolution) {
|
||||||
return "", InvalidPathResolution
|
return "", PathResolutionError{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the nonExistentPathResolution variable is not empty then the initial path requested
|
// If the nonExistentPathResolution variable is not empty then the initial path requested
|
||||||
@@ -101,11 +106,99 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
|||||||
// If the requested directory from EvalSymlinks begins with the server root directory go
|
// If the requested directory from EvalSymlinks begins with the server root directory go
|
||||||
// ahead and return it. If not we'll return an error which will block any further action
|
// ahead and return it. If not we'll return an error which will block any further action
|
||||||
// on the file.
|
// on the file.
|
||||||
if strings.HasPrefix(p, fs.Path()) {
|
if fs.unsafeIsInDataDirectory(p) {
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", InvalidPathResolution
|
return "", PathResolutionError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a path to the file by cleaning it up and appending the root server path to it. This
|
||||||
|
// DOES NOT gaurantee that the file resolves within the server data directory. You'll want to use
|
||||||
|
// the fs.unsafeIsInDataDirectory(p) function to confirm.
|
||||||
|
func (fs *Filesystem) unsafeFilePath(p string) string {
|
||||||
|
// Calling filpath.Clean on the joined directory will resolve it to the absolute path,
|
||||||
|
// removing any ../ type of resolution arguments, and leaving us with a direct path link.
|
||||||
|
//
|
||||||
|
// This will also trim the existing root path off the beginning of the path passed to
|
||||||
|
// the function since that can get a bit messy.
|
||||||
|
return filepath.Clean(filepath.Join(fs.Path(), strings.TrimPrefix(p, fs.Path())))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that that path string starts with the server data directory path. This function DOES NOT
|
||||||
|
// validate that the rest of the path does not end up resolving out of this directory, or that the
|
||||||
|
// targeted file or folder is not a symlink doing the same thing.
|
||||||
|
func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool {
|
||||||
|
return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to keep some of the codebase a little cleaner. Returns a "safe" version of the path
|
||||||
|
// joined with a file. This is important because you cannot just assume that appending a file to a cleaned
|
||||||
|
// path will result in a cleaned path to that file. For example, imagine you have the following scenario:
|
||||||
|
//
|
||||||
|
// my_bad_file -> symlink:/etc/passwd
|
||||||
|
//
|
||||||
|
// cleaned := SafePath("../../etc") -> "/"
|
||||||
|
// filepath.Join(cleaned, my_bad_file) -> "/my_bad_file"
|
||||||
|
//
|
||||||
|
// You might think that "/my_bad_file" is fine since it isn't pointing to the original "../../etc/my_bad_file".
|
||||||
|
// However, this doesn't account for symlinks where the file might be pointing outside of the directory, so
|
||||||
|
// calling a function such as Chown against it would chown the symlinked location, and not the file within the
|
||||||
|
// Wings daemon.
|
||||||
|
func (fs *Filesystem) SafeJoin(dir string, f os.FileInfo) (string, error) {
|
||||||
|
if f.Mode()&os.ModeSymlink != 0 {
|
||||||
|
return fs.SafePath(filepath.Join(dir, f.Name()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Join(dir, f.Name()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Executes the fs.SafePath function in parallel against an array of paths. If any of the calls
|
||||||
|
// fails an error will be returned.
|
||||||
|
func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
|
||||||
|
var cleaned []string
|
||||||
|
|
||||||
|
// Simple locker function to avoid racy appends to the array of cleaned paths.
|
||||||
|
var m = new(sync.Mutex)
|
||||||
|
var push = func(c string) {
|
||||||
|
m.Lock()
|
||||||
|
cleaned = append(cleaned, c)
|
||||||
|
m.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create an error group that we can use to run processes in parallel while retaining
|
||||||
|
// the ability to cancel the entire process immediately should any of it fail.
|
||||||
|
g, ctx := errgroup.WithContext(context.Background())
|
||||||
|
|
||||||
|
// Iterate over all of the paths and generate a cleaned path, if there is an error for any
|
||||||
|
// of the files, abort the process.
|
||||||
|
for _, p := range paths {
|
||||||
|
// Create copy so we can use it within the goroutine correctly.
|
||||||
|
pi := p
|
||||||
|
|
||||||
|
// Recursively call this function to continue digging through the directory tree within
|
||||||
|
// a seperate goroutine. If the context is canceled abort this process.
|
||||||
|
g.Go(func() error {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
default:
|
||||||
|
// If the callback returns true, go ahead and keep walking deeper. This allows
|
||||||
|
// us to programatically continue deeper into directories, or stop digging
|
||||||
|
// if that pathway knows it needs nothing else.
|
||||||
|
if c, err := fs.SafePath(pi); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
push(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Block until all of the routines finish and have returned a value.
|
||||||
|
return cleaned, g.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines if the directory a file is trying to be added to has enough space available
|
// Determines if the directory a file is trying to be added to has enough space available
|
||||||
@@ -114,18 +207,45 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
|||||||
// Because determining the amount of space being used by a server is a taxing operation we
|
// Because determining the amount of space being used by a server is a taxing operation we
|
||||||
// will load it all up into a cache and pull from that as long as the key is not expired.
|
// will load it all up into a cache and pull from that as long as the key is not expired.
|
||||||
func (fs *Filesystem) HasSpaceAvailable() bool {
|
func (fs *Filesystem) HasSpaceAvailable() bool {
|
||||||
var space = fs.Server.Build.DiskSpace
|
space := fs.Server.Build().DiskSpace
|
||||||
|
|
||||||
|
size, err := fs.getCachedDiskUsage()
|
||||||
|
if err != nil {
|
||||||
|
fs.Server.Log().WithField("error", err).Warn("failed to determine root server directory size")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if their folder size, in bytes, is smaller than the amount of space they've
|
||||||
|
// been allocated.
|
||||||
|
fs.Server.Proc().SetDisk(size)
|
||||||
|
|
||||||
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
||||||
|
//
|
||||||
|
// Technically we could skip disk space calculation because we don't need to check if the server exceeds it's limit
|
||||||
|
// but because this method caches the disk usage it would be best to calculate the disk usage and always
|
||||||
|
// return true.
|
||||||
if space <= 0 {
|
if space <= 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we have a match in the cache, use that value in the return. No need to perform an expensive
|
return (size / 1000.0 / 1000.0) <= space
|
||||||
// disk operation, even if this is an empty value.
|
}
|
||||||
if x, exists := fs.Server.Cache.Get("disk_used"); exists {
|
|
||||||
fs.Server.Resources.Disk = x.(int64)
|
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
||||||
return (x.(int64) / 1000.0 / 1000.0) <= space
|
// as needed without overly taxing the system. This will prioritize the value from the cache to avoid
|
||||||
|
// excessive IO usage. We will only walk the filesystem and determine the size of the directory if there
|
||||||
|
// is no longer a cached value.
|
||||||
|
func (fs *Filesystem) getCachedDiskUsage() (int64, error) {
|
||||||
|
// Obtain an exclusive lock on this process so that we don't unintentionally run it at the same
|
||||||
|
// time as another running process. Once the lock is available it'll read from the cache for the
|
||||||
|
// second call rather than hitting the disk in parallel.
|
||||||
|
//
|
||||||
|
// This effectively the same speed as running this call in parallel since this cache will return
|
||||||
|
// instantly on the second call.
|
||||||
|
fs.cacheDiskMu.Lock()
|
||||||
|
defer fs.cacheDiskMu.Unlock()
|
||||||
|
|
||||||
|
if x, exists := fs.Server.cache.Get("disk_used"); exists {
|
||||||
|
return x.(int64), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is no size its either because there is no data (in which case running this function
|
// If there is no size its either because there is no data (in which case running this function
|
||||||
@@ -133,37 +253,30 @@ func (fs *Filesystem) HasSpaceAvailable() bool {
|
|||||||
// grab the size of their data directory. This is a taxing operation, so we want to store it in
|
// grab the size of their data directory. This is a taxing operation, so we want to store it in
|
||||||
// the cache once we've gotten it.
|
// the cache once we've gotten it.
|
||||||
size, err := fs.DirectorySize("/")
|
size, err := fs.DirectorySize("/")
|
||||||
if err != nil {
|
|
||||||
zap.S().Warnw("failed to determine directory size", zap.String("server", fs.Server.Uuid), zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always cache the size, even if there is an error. We want to always return that value
|
// Always cache the size, even if there is an error. We want to always return that value
|
||||||
// so that we don't cause an endless loop of determining the disk size if there is a temporary
|
// so that we don't cause an endless loop of determining the disk size if there is a temporary
|
||||||
// error encountered.
|
// error encountered.
|
||||||
fs.Server.Cache.Set("disk_used", size, time.Second*60)
|
fs.Server.cache.Set("disk_used", size, time.Second*60)
|
||||||
|
|
||||||
// Determine if their folder size, in bytes, is smaller than the amount of space they've
|
return size, err
|
||||||
// been allocated.
|
|
||||||
fs.Server.Resources.Disk = size
|
|
||||||
|
|
||||||
return (size / 1000.0 / 1000.0) <= space
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines the directory size of a given location by running parallel tasks to iterate
|
// Determines the directory size of a given location by running parallel tasks to iterate
|
||||||
// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
|
// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
|
||||||
// on locations with tons of files, so it is recommended that you cache the output.
|
// on locations with tons of files, so it is recommended that you cache the output.
|
||||||
func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
||||||
w := fs.NewWalker()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
var size int64
|
var size int64
|
||||||
err := w.Walk(dir, ctx, func(f os.FileInfo, _ string) bool {
|
err := fs.Walk(dir, func(_ string, f os.FileInfo, err error) error {
|
||||||
// Only increment the size when we're dealing with a file specifically, otherwise
|
if err != nil {
|
||||||
// just continue digging deeper until there are no more directories to iterate over.
|
return fs.handleWalkerError(err, f)
|
||||||
|
}
|
||||||
|
|
||||||
if !f.IsDir() {
|
if !f.IsDir() {
|
||||||
atomic.AddInt64(&size, f.Size())
|
atomic.AddInt64(&size, f.Size())
|
||||||
}
|
}
|
||||||
return true
|
|
||||||
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
return size, err
|
return size, err
|
||||||
@@ -245,7 +358,7 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
|||||||
|
|
||||||
// Finally, chown the file to ensure the permissions don't end up out-of-whack
|
// Finally, chown the file to ensure the permissions don't end up out-of-whack
|
||||||
// if we had just created it.
|
// if we had just created it.
|
||||||
return fs.Chown(p)
|
return fs.Chown(cleaned)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Defines the stat struct object.
|
// Defines the stat struct object.
|
||||||
@@ -333,6 +446,20 @@ func (fs *Filesystem) Rename(from string, to string) error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f, err := os.Stat(cleanedFrom); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
} else {
|
||||||
|
d := cleanedTo
|
||||||
|
if !f.IsDir() {
|
||||||
|
d = strings.TrimSuffix(d, path.Base(cleanedTo))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the directory we're moving into exists correctly on the system.
|
||||||
|
if mkerr := os.MkdirAll(d, 0644); mkerr != nil {
|
||||||
|
return errors.WithStack(mkerr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return os.Rename(cleanedFrom, cleanedTo)
|
return os.Rename(cleanedFrom, cleanedTo)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,7 +474,7 @@ func (fs *Filesystem) Chown(path string) error {
|
|||||||
if s, err := os.Stat(cleaned); err != nil {
|
if s, err := os.Stat(cleaned); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
} else if !s.IsDir() {
|
} else if !s.IsDir() {
|
||||||
return os.Chown(cleaned, fs.Configuration.User.Uid, fs.Configuration.User.Gid)
|
return os.Chown(cleaned, config.Get().System.User.Uid, config.Get().System.User.Gid)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.chownDirectory(cleaned)
|
return fs.chownDirectory(cleaned)
|
||||||
@@ -373,16 +500,27 @@ func (fs *Filesystem) chownDirectory(path string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
|
// Do not attempt to chmod a symlink. Go's os.Chown function will affect the symlink
|
||||||
|
// so if it points to a location outside the data directory the user would be able to
|
||||||
|
// (un)intentionally modify that files permissions.
|
||||||
|
if f.Mode()&os.ModeSymlink != 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := fs.SafeJoin(cleaned, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if f.IsDir() {
|
if f.IsDir() {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
go func(p string) {
|
go func(p string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
fs.chownDirectory(p)
|
fs.chownDirectory(p)
|
||||||
}(filepath.Join(cleaned, f.Name()))
|
}(p)
|
||||||
} else {
|
} else {
|
||||||
// Chown the file.
|
os.Chown(p, config.Get().System.User.Uid, config.Get().System.User.Gid)
|
||||||
os.Chown(filepath.Join(cleaned, f.Name()), fs.Configuration.User.Uid, fs.Configuration.User.Gid)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -401,13 +539,12 @@ func (fs *Filesystem) Copy(p string) error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s, err := os.Stat(cleaned); (err != nil && os.IsNotExist(err)) || s.IsDir() || !s.Mode().IsRegular() {
|
if s, err := os.Stat(cleaned); err != nil {
|
||||||
// For now I think I am okay just returning a nil response if the thing
|
return err
|
||||||
// we're trying to copy doesn't exist. Probably will want to come back and
|
} else if s.IsDir() || !s.Mode().IsRegular() {
|
||||||
// re-evaluate if this is a smart decision (I'm guessing not).
|
// If this is a directory or not a regular file, just throw a not-exist error
|
||||||
return nil
|
// since anything calling this function should understand what that means.
|
||||||
} else if err != nil {
|
return os.ErrNotExist
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
base := filepath.Base(cleaned)
|
base := filepath.Base(cleaned)
|
||||||
@@ -475,17 +612,26 @@ func (fs *Filesystem) Copy(p string) error {
|
|||||||
// Deletes a file or folder from the system. Prevents the user from accidentally
|
// Deletes a file or folder from the system. Prevents the user from accidentally
|
||||||
// (or maliciously) removing their root server data directory.
|
// (or maliciously) removing their root server data directory.
|
||||||
func (fs *Filesystem) Delete(p string) error {
|
func (fs *Filesystem) Delete(p string) error {
|
||||||
cleaned, err := fs.SafePath(p)
|
// This is one of the few (only?) places in the codebase where we're explictly not using
|
||||||
if err != nil {
|
// the SafePath functionality when working with user provided input. If we did, you would
|
||||||
return errors.WithStack(err)
|
// not be able to delete a file that is a symlink pointing to a location outside of the data
|
||||||
|
// directory.
|
||||||
|
//
|
||||||
|
// We also want to avoid resolving a symlink that points _within_ the data directory and thus
|
||||||
|
// deleting the actual source file for the symlink rather than the symlink itself. For these
|
||||||
|
// purposes just resolve the actual file path using filepath.Join() and confirm that the path
|
||||||
|
// exists within the data directory.
|
||||||
|
resolved := fs.unsafeFilePath(p)
|
||||||
|
if !fs.unsafeIsInDataDirectory(resolved) {
|
||||||
|
return PathResolutionError{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Block any whoopsies.
|
// Block any whoopsies.
|
||||||
if cleaned == fs.Path() {
|
if resolved == fs.Path() {
|
||||||
return errors.New("cannot delete root server directory")
|
return errors.New("cannot delete root server directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
return os.RemoveAll(cleaned)
|
return os.RemoveAll(resolved)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lists the contents of a given directory and returns stat information about each
|
// Lists the contents of a given directory and returns stat information about each
|
||||||
@@ -518,7 +664,14 @@ func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) {
|
|||||||
|
|
||||||
var m = "inode/directory"
|
var m = "inode/directory"
|
||||||
if !f.IsDir() {
|
if !f.IsDir() {
|
||||||
|
cleanedp, _ := fs.SafeJoin(cleaned, f)
|
||||||
|
if cleanedp != "" {
|
||||||
m, _, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
|
m, _, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
|
||||||
|
} else {
|
||||||
|
// Just pass this for an unknown type because the file could not safely be resolved within
|
||||||
|
// the server data path.
|
||||||
|
m = "application/octet-stream"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out[idx] = &Stat{
|
out[idx] = &Stat{
|
||||||
@@ -575,9 +728,6 @@ func (fs *Filesystem) GetIncludedFiles(dir string, ignored []string) (*backup.In
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
w := fs.NewWalker()
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
i, err := ignore.CompileIgnoreLines(ignored...)
|
i, err := ignore.CompileIgnoreLines(ignored...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -586,20 +736,104 @@ func (fs *Filesystem) GetIncludedFiles(dir string, ignored []string) (*backup.In
|
|||||||
// Walk through all of the files and directories on a server. This callback only returns
|
// Walk through all of the files and directories on a server. This callback only returns
|
||||||
// files found, and will keep walking deeper and deeper into directories.
|
// files found, and will keep walking deeper and deeper into directories.
|
||||||
inc := new(backup.IncludedFiles)
|
inc := new(backup.IncludedFiles)
|
||||||
if err := w.Walk(cleaned, ctx, func(f os.FileInfo, p string) bool {
|
|
||||||
|
if err := fs.Walk(cleaned, func(p string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return fs.handleWalkerError(err, f)
|
||||||
|
}
|
||||||
|
|
||||||
// Avoid unnecessary parsing if there are no ignored files, nothing will match anyways
|
// Avoid unnecessary parsing if there are no ignored files, nothing will match anyways
|
||||||
// so no reason to call the function.
|
// so no reason to call the function.
|
||||||
if len(ignored) == 0 || !i.MatchesPath(strings.TrimPrefix(p, fs.Path() + "/")) {
|
if len(ignored) == 0 || !i.MatchesPath(strings.TrimPrefix(p, fs.Path()+"/")) {
|
||||||
inc.Push(&f, p)
|
inc.Push(&f, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We can't just abort if the path is technically ignored. It is possible there is a nested
|
// We can't just abort if the path is technically ignored. It is possible there is a nested
|
||||||
// file or folder that should not be excluded, so in this case we need to just keep going
|
// file or folder that should not be excluded, so in this case we need to just keep going
|
||||||
// until we get to a final state.
|
// until we get to a final state.
|
||||||
return true
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return inc, nil
|
return inc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Compresses all of the files matching the given paths in the specified directory. This function
|
||||||
|
// also supports passing nested paths to only compress certain files and folders when working in
|
||||||
|
// a larger directory. This effectively creates a local backup, but rather than ignoring specific
|
||||||
|
// files and folders, it takes an allow-list of files and folders.
|
||||||
|
//
|
||||||
|
// All paths are relative to the dir that is passed in as the first argument, and the compressed
|
||||||
|
// file will be placed at that location named `archive-{date}.tar.gz`.
|
||||||
|
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
|
||||||
|
cleanedRootDir, err := fs.SafePath(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take all of the paths passed in and merge them together with the root directory we've gotten.
|
||||||
|
for i, p := range paths {
|
||||||
|
paths[i] = filepath.Join(cleanedRootDir, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
cleaned, err := fs.ParallelSafePath(paths)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
inc := new(backup.IncludedFiles)
|
||||||
|
// Iterate over all of the cleaned paths and merge them into a large object of final file
|
||||||
|
// paths to pass into the archiver. As directories are encountered this will drop into them
|
||||||
|
// and look for all of the files.
|
||||||
|
for _, p := range cleaned {
|
||||||
|
f, err := os.Stat(p)
|
||||||
|
if err != nil {
|
||||||
|
fs.Server.Log().WithField("error", err).WithField("path", p).Debug("failed to stat file or directory for compression")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.IsDir() {
|
||||||
|
err := fs.Walk(p, func(s string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return fs.handleWalkerError(err, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !info.IsDir() {
|
||||||
|
inc.Push(&info, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
inc.Push(&f, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
a := &backup.Archive{TrimPrefix: fs.Path(), Files: inc}
|
||||||
|
|
||||||
|
d := path.Join(cleanedRootDir, fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")))
|
||||||
|
|
||||||
|
return a.Create(d, context.Background())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle errors encountered when walking through directories.
|
||||||
|
//
|
||||||
|
// If there is a path resolution error just skip the item entirely. Only return this for a
|
||||||
|
// directory, otherwise return nil. Returning this error for a file will stop the walking
|
||||||
|
// for the remainder of the directory. This is assuming an os.FileInfo struct was even returned.
|
||||||
|
func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
|
||||||
|
if !IsPathResolutionError(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if f != nil && f.IsDir() {
|
||||||
|
return filepath.SkipDir
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
130
server/filesystem_unarchive.go
Normal file
130
server/filesystem_unarchive.go
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"archive/zip"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"github.com/mholt/archiver/v3"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Look through a given archive and determine if decompressing it would put the server over
|
||||||
|
// its allocated disk space limit.
|
||||||
|
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (bool, error) {
|
||||||
|
// Don't waste time trying to determine this if we know the server will have the space for
|
||||||
|
// it since there is no limit.
|
||||||
|
if fs.Server.Build().DiskSpace <= 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wg := new(sync.WaitGroup)
|
||||||
|
|
||||||
|
var dirSize int64
|
||||||
|
var cErr error
|
||||||
|
// Get the cached size in a parallel process so that if it is not cached we are not
|
||||||
|
// waiting an unnecessary amount of time on this call.
|
||||||
|
go func() {
|
||||||
|
wg.Add(1)
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
dirSize, cErr = fs.getCachedDiskUsage()
|
||||||
|
}()
|
||||||
|
|
||||||
|
var size int64
|
||||||
|
// In a seperate thread, walk over the archive and figure out just how large the final
|
||||||
|
// output would be from dearchiving it.
|
||||||
|
go func() {
|
||||||
|
wg.Add(1)
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Walk all of the files and calculate the total decompressed size of this archive.
|
||||||
|
archiver.Walk(source, func(f archiver.File) error {
|
||||||
|
atomic.AddInt64(&size, f.Size())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}()
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
return ((dirSize + size) / 1000.0 / 1000.0) <= fs.Server.Build().DiskSpace, cErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decompress a file in a given directory by using the archiver tool to infer the file
|
||||||
|
// type and go from there. This will walk over all of the files within the given archive
|
||||||
|
// and ensure that there is not a zip-slip attack being attempted by validating that the
|
||||||
|
// final path is within the server data directory.
|
||||||
|
func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||||
|
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
|
||||||
|
// and then extract that file from the archive and write it to the disk. If any part of this process
|
||||||
|
// encounters an error the entire process will be stopped.
|
||||||
|
return archiver.Walk(source, func(f archiver.File) error {
|
||||||
|
// Don't waste time with directories, we don't need to create them if they have no contents, and
|
||||||
|
// we will ensure the directory exists when opening the file for writing anyways.
|
||||||
|
if f.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fs.extractFileFromArchive(f)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extracts a single file from the archive and writes it to the disk after verifying that it will end
|
||||||
|
// up in the server data directory.
|
||||||
|
func (fs *Filesystem) extractFileFromArchive(f archiver.File) error {
|
||||||
|
var name string
|
||||||
|
|
||||||
|
switch s := f.Sys().(type) {
|
||||||
|
case *tar.Header:
|
||||||
|
name = s.Name
|
||||||
|
case *gzip.Header:
|
||||||
|
name = s.Name
|
||||||
|
case *zip.FileHeader:
|
||||||
|
name = s.Name
|
||||||
|
default:
|
||||||
|
return errors.New(fmt.Sprintf("could not parse underlying data source with type %s", reflect.TypeOf(s).String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Guard against a zip-slip attack and prevent writing a file to a destination outside of
|
||||||
|
// the server root directory.
|
||||||
|
p, err := fs.SafePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the directory structure for this file exists before trying to write the file
|
||||||
|
// to the disk, otherwise we'll have some unexpected fun.
|
||||||
|
if err := os.MkdirAll(strings.TrimSuffix(p, filepath.Base(p)), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open the file and truncate it if it already exists.
|
||||||
|
o, err := os.OpenFile(p, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer o.Close()
|
||||||
|
|
||||||
|
_, cerr := io.Copy(o, f)
|
||||||
|
|
||||||
|
return cerr
|
||||||
|
}
|
||||||
@@ -2,69 +2,140 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"golang.org/x/sync/errgroup"
|
"github.com/gammazero/workerpool"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileWalker struct {
|
type FileWalker struct {
|
||||||
*Filesystem
|
*Filesystem
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PooledFileWalker struct {
|
||||||
|
wg sync.WaitGroup
|
||||||
|
pool *workerpool.WorkerPool
|
||||||
|
callback filepath.WalkFunc
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
err error
|
||||||
|
errOnce sync.Once
|
||||||
|
|
||||||
|
Filesystem *Filesystem
|
||||||
|
}
|
||||||
|
|
||||||
// Returns a new walker instance.
|
// Returns a new walker instance.
|
||||||
func (fs *Filesystem) NewWalker() *FileWalker {
|
func (fs *Filesystem) NewWalker() *FileWalker {
|
||||||
return &FileWalker{fs}
|
return &FileWalker{fs}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over all of the files and directories within a given directory. When a file is
|
// Creates a new pooled file walker that will concurrently walk over a given directory but limit itself
|
||||||
// found the callback will be called with the file information. If a directory is encountered
|
// to a worker pool as to not completely flood out the system or cause a process crash.
|
||||||
// it will be recursively passed back through to this function.
|
func newPooledWalker(fs *Filesystem) *PooledFileWalker {
|
||||||
func (fw *FileWalker) Walk(dir string, ctx context.Context, callback func (os.FileInfo, string) bool) error {
|
return &PooledFileWalker{
|
||||||
cleaned, err := fw.SafePath(dir)
|
Filesystem: fs,
|
||||||
|
// Create a worker pool that is the same size as the number of processors available on the
|
||||||
|
// system. Going much higher doesn't provide much of a performance boost, and is only more
|
||||||
|
// likely to lead to resource overloading anyways.
|
||||||
|
pool: workerpool.New(runtime.GOMAXPROCS(0)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process a given path by calling the callback function for all of the files and directories within
|
||||||
|
// the path, and then dropping into any directories that we come across.
|
||||||
|
func (w *PooledFileWalker) process(path string) error {
|
||||||
|
p, err := w.Filesystem.SafePath(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get all of the files from this directory.
|
files, err := ioutil.ReadDir(p)
|
||||||
files, err := ioutil.ReadDir(cleaned)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an error group that we can use to run processes in parallel while retaining
|
// Loop over all of the files and directories in the given directory and call the provided
|
||||||
// the ability to cancel the entire process immediately should any of it fail.
|
// callback function. If we encounter a directory, push that directory onto the worker queue
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
// to be processed.
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
if f.IsDir() {
|
sp, err := w.Filesystem.SafeJoin(p, f)
|
||||||
fi := f
|
if err != nil {
|
||||||
p := filepath.Join(cleaned, f.Name())
|
// Let the callback function handle what to do if there is a path resolution error because a
|
||||||
// Recursively call this function to continue digging through the directory tree within
|
// dangerous path was resolved. If there is an error returned, return from this entire process
|
||||||
// a seperate goroutine. If the context is canceled abort this process.
|
// otherwise just skip over this specific file. We don't care if its a file or a directory at
|
||||||
g.Go(func() error {
|
// this point since either way we're skipping it, however, still check for the SkipDir since that
|
||||||
select {
|
// would be thrown otherwise.
|
||||||
case <-ctx.Done():
|
if err = w.callback(sp, f, err); err != nil && err != filepath.SkipDir {
|
||||||
return ctx.Err()
|
return err
|
||||||
default:
|
}
|
||||||
// If the callback returns true, go ahead and keep walking deeper. This allows
|
|
||||||
// us to programatically continue deeper into directories, or stop digging
|
continue
|
||||||
// if that pathway knows it needs nothing else.
|
}
|
||||||
if callback(fi, p) {
|
|
||||||
return fw.Walk(p, ctx, callback)
|
i, err := os.Stat(sp)
|
||||||
|
// You might end up getting an error about a file or folder not existing if the given path
|
||||||
|
// if it is an invalid symlink. We can safely just skip over these files I believe.
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the user-provided callback for this file or directory. If an error is returned that is
|
||||||
|
// not a SkipDir call, abort the entire process and bubble that error up.
|
||||||
|
if err = w.callback(sp, i, err); err != nil && err != filepath.SkipDir {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is a directory, and we didn't get a SkipDir error, continue through by pushing another
|
||||||
|
// job to the pool to handle it. If we requested a skip, don't do anything just continue on to the
|
||||||
|
// next item.
|
||||||
|
if i.IsDir() && err != filepath.SkipDir {
|
||||||
|
w.push(sp)
|
||||||
|
} else if !i.IsDir() && err == filepath.SkipDir {
|
||||||
|
// Per the spec for the callback, if we get a SkipDir error but it is returned for an item
|
||||||
|
// that is _not_ a directory, abort the remaining operations on the directory.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push a new path into the worker pool and increment the waitgroup so that we do not return too
|
||||||
|
// early and cause panic's as internal directories attempt to submit to the pool.
|
||||||
|
func (w *PooledFileWalker) push(path string) {
|
||||||
|
w.wg.Add(1)
|
||||||
|
w.pool.Submit(func() {
|
||||||
|
defer w.wg.Done()
|
||||||
|
if err := w.process(path); err != nil {
|
||||||
|
w.errOnce.Do(func() {
|
||||||
|
w.err = err
|
||||||
|
if w.cancel != nil {
|
||||||
|
w.cancel()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
} else {
|
|
||||||
// If this isn't a directory, go ahead and pass the file information into the
|
|
||||||
// callback. We don't care about the response since we won't be stepping into
|
|
||||||
// anything from here.
|
|
||||||
callback(f, filepath.Join(cleaned, f.Name()))
|
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walks the given directory and executes the callback function for all of the files and directories
|
||||||
|
// that are encountered.
|
||||||
|
func (fs *Filesystem) Walk(dir string, callback filepath.WalkFunc) error {
|
||||||
|
w := newPooledWalker(fs)
|
||||||
|
w.callback = callback
|
||||||
|
|
||||||
|
_, cancel := context.WithCancel(context.Background())
|
||||||
|
w.cancel = cancel
|
||||||
|
|
||||||
|
w.push(dir)
|
||||||
|
|
||||||
|
w.wg.Wait()
|
||||||
|
w.pool.StopWait()
|
||||||
|
|
||||||
|
if w.err != nil {
|
||||||
|
return w.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Block until all of the routines finish and have returned a value.
|
return nil
|
||||||
return g.Wait()
|
|
||||||
}
|
}
|
||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
@@ -11,27 +12,44 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"go.uber.org/zap"
|
"golang.org/x/sync/semaphore"
|
||||||
|
"html/template"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Executes the installation stack for a server process. Bubbles any errors up to the calling
|
// Executes the installation stack for a server process. Bubbles any errors up to the calling
|
||||||
// function which should handle contacting the panel to notify it of the server state.
|
// function which should handle contacting the panel to notify it of the server state.
|
||||||
func (s *Server) Install() error {
|
//
|
||||||
|
// Pass true as the first arugment in order to execute a server sync before the process to
|
||||||
|
// ensure the latest information is used.
|
||||||
|
func (s *Server) Install(sync bool) error {
|
||||||
|
if sync {
|
||||||
|
s.Log().Info("syncing server state with remote source before executing installation process")
|
||||||
|
if err := s.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
err := s.internalInstall()
|
err := s.internalInstall()
|
||||||
|
|
||||||
zap.S().Debugw("notifying panel of server install state", zap.String("server", s.Uuid))
|
s.Log().Debug("notifying panel of server install state")
|
||||||
if serr := s.SyncInstallState(err == nil); serr != nil {
|
if serr := s.SyncInstallState(err == nil); serr != nil {
|
||||||
zap.S().Warnw(
|
l := s.Log().WithField("was_successful", err == nil)
|
||||||
"failed to notify panel of server install state",
|
|
||||||
zap.String("server", s.Uuid),
|
// If the request was successful but there was an error with this request, attach the
|
||||||
zap.Bool("was_successful", err == nil),
|
// error to this log entry. Otherwise ignore it in this log since whatever is calling
|
||||||
zap.Error(serr),
|
// this function should handle the error and will end up logging the same one.
|
||||||
)
|
if err == nil {
|
||||||
|
l.WithField("error", serr)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Warn("failed to notify panel of server install state")
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
@@ -41,18 +59,18 @@ func (s *Server) Install() error {
|
|||||||
// does not touch any existing files for the server, other than what the script modifies.
|
// does not touch any existing files for the server, other than what the script modifies.
|
||||||
func (s *Server) Reinstall() error {
|
func (s *Server) Reinstall() error {
|
||||||
if s.GetState() != ProcessOfflineState {
|
if s.GetState() != ProcessOfflineState {
|
||||||
zap.S().Debugw("waiting for server instance to enter a stopped state", zap.String("server", s.Uuid))
|
s.Log().Debug("waiting for server instance to enter a stopped state")
|
||||||
if err := s.Environment.WaitForStop(10, true); err != nil {
|
if err := s.Environment.WaitForStop(10, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.Install()
|
return s.Install(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Internal installation function used to simplify reporting back to the Panel.
|
// Internal installation function used to simplify reporting back to the Panel.
|
||||||
func (s *Server) internalInstall() error {
|
func (s *Server) internalInstall() error {
|
||||||
script, rerr, err := api.NewRequester().GetInstallationScript(s.Uuid)
|
script, rerr, err := api.NewRequester().GetInstallationScript(s.Id())
|
||||||
if err != nil || rerr != nil {
|
if err != nil || rerr != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -66,14 +84,12 @@ func (s *Server) internalInstall() error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Infow("beginning installation process for server", zap.String("server", s.Uuid))
|
s.Log().Info("beginning installation process for server")
|
||||||
|
|
||||||
if err := p.Run(); err != nil {
|
if err := p.Run(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Infow("completed installation process for server", zap.String("server", s.Uuid))
|
s.Log().Info("completed installation process for server")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,7 +98,7 @@ type InstallationProcess struct {
|
|||||||
Script *api.InstallationScript
|
Script *api.InstallationScript
|
||||||
|
|
||||||
client *client.Client
|
client *client.Client
|
||||||
mutex *sync.Mutex
|
context context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generates a new installation process struct that will be used to create containers,
|
// Generates a new installation process struct that will be used to create containers,
|
||||||
@@ -91,24 +107,99 @@ func NewInstallationProcess(s *Server, script *api.InstallationScript) (*Install
|
|||||||
proc := &InstallationProcess{
|
proc := &InstallationProcess{
|
||||||
Script: script,
|
Script: script,
|
||||||
Server: s,
|
Server: s,
|
||||||
mutex: &sync.Mutex{},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
s.installer.cancel = &cancel
|
||||||
|
|
||||||
if c, err := client.NewClientWithOpts(client.FromEnv); err != nil {
|
if c, err := client.NewClientWithOpts(client.FromEnv); err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
} else {
|
} else {
|
||||||
proc.client = c
|
proc.client = c
|
||||||
|
proc.context = ctx
|
||||||
}
|
}
|
||||||
|
|
||||||
return proc, nil
|
return proc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Try to obtain an exclusive lock on the installation process for the server. Waits up to 10
|
||||||
|
// seconds before aborting with a context timeout.
|
||||||
|
func (s *Server) acquireInstallationLock() error {
|
||||||
|
if s.installer.sem == nil {
|
||||||
|
s.installer.sem = semaphore.NewWeighted(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, _ := context.WithTimeout(context.Background(), time.Second*10)
|
||||||
|
|
||||||
|
return s.installer.sem.Acquire(ctx, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determines if the server is actively running the installation process by checking the status
|
||||||
|
// of the semaphore lock.
|
||||||
|
func (s *Server) IsInstalling() bool {
|
||||||
|
if s.installer.sem == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.installer.sem.TryAcquire(1) {
|
||||||
|
// If we made it into this block it means we were able to obtain an exclusive lock
|
||||||
|
// on the semaphore. In that case, go ahead and release that lock immediately, and
|
||||||
|
// return false.
|
||||||
|
s.installer.sem.Release(1)
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aborts the server installation process by calling the cancel function on the installer
|
||||||
|
// context.
|
||||||
|
func (s *Server) AbortInstallation() {
|
||||||
|
if !s.IsInstalling() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.installer.cancel != nil {
|
||||||
|
cancel := *s.installer.cancel
|
||||||
|
|
||||||
|
s.Log().Warn("aborting running installation process")
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes the installer container for the server.
|
||||||
|
func (ip *InstallationProcess) RemoveContainer() {
|
||||||
|
err := ip.client.ContainerRemove(ip.context, ip.Server.Id()+"_installer", types.ContainerRemoveOptions{
|
||||||
|
RemoveVolumes: true,
|
||||||
|
Force: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil && !client.IsErrNotFound(err) {
|
||||||
|
ip.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to delete server install container")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Runs the installation process, this is done as a backgrounded thread. This will configure
|
// Runs the installation process, this is done as a backgrounded thread. This will configure
|
||||||
// the required environment, and then spin up the installation container.
|
// the required environment, and then spin up the installation container.
|
||||||
//
|
//
|
||||||
// Once the container finishes installing the results will be stored in an installation
|
// Once the container finishes installing the results will be stored in an installation
|
||||||
// log in the server's configuration directory.
|
// log in the server's configuration directory.
|
||||||
func (ip *InstallationProcess) Run() error {
|
func (ip *InstallationProcess) Run() error {
|
||||||
|
ip.Server.Log().Debug("acquiring installation process lock")
|
||||||
|
if err := ip.Server.acquireInstallationLock(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We now have an exclusive lock on this installation process. Ensure that whenever this
|
||||||
|
// process is finished that the semaphore is released so that other processes and be executed
|
||||||
|
// without encounting a wait timeout.
|
||||||
|
defer func() {
|
||||||
|
ip.Server.Log().Debug("releasing installation process lock")
|
||||||
|
ip.Server.installer.sem.Release(1)
|
||||||
|
ip.Server.installer.cancel = nil
|
||||||
|
}()
|
||||||
|
|
||||||
installPath, err := ip.BeforeExecute()
|
installPath, err := ip.BeforeExecute()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -116,13 +207,15 @@ func (ip *InstallationProcess) Run() error {
|
|||||||
|
|
||||||
cid, err := ip.Execute(installPath)
|
cid, err := ip.Execute(installPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
ip.RemoveContainer()
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this step fails, log a warning but don't exit out of the process. This is completely
|
// If this step fails, log a warning but don't exit out of the process. This is completely
|
||||||
// internal to the daemon's functionality, and does not affect the status of the server itself.
|
// internal to the daemon's functionality, and does not affect the status of the server itself.
|
||||||
if err := ip.AfterExecute(cid); err != nil {
|
if err := ip.AfterExecute(cid); err != nil {
|
||||||
zap.S().Warnw("failed to complete after-execute step of installation process", zap.String("server", ip.Server.Uuid), zap.Error(err))
|
ip.Server.Log().WithField("error", err).Warn("failed to complete after-execute step of installation process")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -131,6 +224,12 @@ func (ip *InstallationProcess) Run() error {
|
|||||||
// Writes the installation script to a temporary file on the host machine so that it
|
// Writes the installation script to a temporary file on the host machine so that it
|
||||||
// can be properly mounted into the installation container and then executed.
|
// can be properly mounted into the installation container and then executed.
|
||||||
func (ip *InstallationProcess) writeScriptToDisk() (string, error) {
|
func (ip *InstallationProcess) writeScriptToDisk() (string, error) {
|
||||||
|
// Make sure the temp directory root exists before trying to make a directory within it. The
|
||||||
|
// ioutil.TempDir call expects this base to exist, it won't create it for you.
|
||||||
|
if err := os.MkdirAll(path.Join(os.TempDir(), "pterodactyl/"), 0700); err != nil {
|
||||||
|
return "", errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
d, err := ioutil.TempDir("", "pterodactyl/")
|
d, err := ioutil.TempDir("", "pterodactyl/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.WithStack(err)
|
return "", errors.WithStack(err)
|
||||||
@@ -160,7 +259,7 @@ func (ip *InstallationProcess) writeScriptToDisk() (string, error) {
|
|||||||
|
|
||||||
// Pulls the docker image to be used for the installation container.
|
// Pulls the docker image to be used for the installation container.
|
||||||
func (ip *InstallationProcess) pullInstallationImage() error {
|
func (ip *InstallationProcess) pullInstallationImage() error {
|
||||||
r, err := ip.client.ImagePull(context.Background(), ip.Script.ContainerImage, types.ImagePullOptions{})
|
r, err := ip.client.ImagePull(ip.context, ip.Script.ContainerImage, types.ImagePullOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
@@ -168,7 +267,7 @@ func (ip *InstallationProcess) pullInstallationImage() error {
|
|||||||
// Block continuation until the image has been pulled successfully.
|
// Block continuation until the image has been pulled successfully.
|
||||||
scanner := bufio.NewScanner(r)
|
scanner := bufio.NewScanner(r)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
zap.S().Debugw(scanner.Text())
|
log.Debug(scanner.Text())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
if err := scanner.Err(); err != nil {
|
||||||
@@ -214,7 +313,7 @@ func (ip *InstallationProcess) BeforeExecute() (string, error) {
|
|||||||
Force: true,
|
Force: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ip.client.ContainerRemove(context.Background(), ip.Server.Uuid+"_installer", opts); err != nil {
|
if err := ip.client.ContainerRemove(ip.context, ip.Server.Id()+"_installer", opts); err != nil {
|
||||||
if !client.IsErrNotFound(err) {
|
if !client.IsErrNotFound(err) {
|
||||||
e = append(e, err)
|
e = append(e, err)
|
||||||
}
|
}
|
||||||
@@ -234,17 +333,17 @@ func (ip *InstallationProcess) BeforeExecute() (string, error) {
|
|||||||
|
|
||||||
// Returns the log path for the installation process.
|
// Returns the log path for the installation process.
|
||||||
func (ip *InstallationProcess) GetLogPath() string {
|
func (ip *InstallationProcess) GetLogPath() string {
|
||||||
return filepath.Join(config.Get().System.GetInstallLogPath(), ip.Server.Uuid+".log")
|
return filepath.Join(config.Get().System.GetInstallLogPath(), ip.Server.Id()+".log")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleans up after the execution of the installation process. This grabs the logs from the
|
// Cleans up after the execution of the installation process. This grabs the logs from the
|
||||||
// process to store in the server configuration directory, and then destroys the associated
|
// process to store in the server configuration directory, and then destroys the associated
|
||||||
// installation container.
|
// installation container.
|
||||||
func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
||||||
ctx := context.Background()
|
defer ip.RemoveContainer()
|
||||||
|
|
||||||
zap.S().Debugw("pulling installation logs for server", zap.String("server", ip.Server.Uuid), zap.String("container_id", containerId))
|
ip.Server.Log().WithField("container_id", containerId).Debug("pulling installation logs for server")
|
||||||
reader, err := ip.client.ContainerLogs(ctx, containerId, types.ContainerLogsOptions{
|
reader, err := ip.client.ContainerLogs(ip.context, containerId, types.ContainerLogsOptions{
|
||||||
ShowStdout: true,
|
ShowStdout: true,
|
||||||
ShowStderr: true,
|
ShowStderr: true,
|
||||||
Follow: false,
|
Follow: false,
|
||||||
@@ -261,20 +360,39 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// We write the contents of the container output to a more "permanent" file so that they
|
// We write the contents of the container output to a more "permanent" file so that they
|
||||||
// can be referenced after this container is deleted.
|
// can be referenced after this container is deleted. We'll also include the environment
|
||||||
if _, err := io.Copy(f, reader); err != nil {
|
// variables passed into the container to make debugging things a little easier.
|
||||||
|
ip.Server.Log().WithField("path", ip.GetLogPath()).Debug("writing most recent installation logs to disk")
|
||||||
|
|
||||||
|
tmpl, err := template.New("header").Parse(`Pterodactyl Server Installation Log
|
||||||
|
|
||||||
|
|
|
||||||
|
| Details
|
||||||
|
| ------------------------------
|
||||||
|
Server UUID: {{.Server.Id()}}
|
||||||
|
Container Image: {{.Script.ContainerImage}}
|
||||||
|
Container Entrypoint: {{.Script.Entrypoint}}
|
||||||
|
|
||||||
|
|
|
||||||
|
| Environment Variables
|
||||||
|
| ------------------------------
|
||||||
|
{{ range $key, $value := .Server.GetEnvironmentVariables }} {{ $value }}
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
|
|
||||||
|
| Script Output
|
||||||
|
| ------------------------------
|
||||||
|
`)
|
||||||
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Debugw("removing server installation container", zap.String("server", ip.Server.Uuid), zap.String("container_id", containerId))
|
if err := tmpl.Execute(f, ip); err != nil {
|
||||||
rErr := ip.client.ContainerRemove(ctx, containerId, types.ContainerRemoveOptions{
|
return errors.WithStack(err)
|
||||||
RemoveVolumes: true,
|
}
|
||||||
RemoveLinks: false,
|
|
||||||
Force: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
if rErr != nil && !client.IsErrNotFound(rErr) {
|
if _, err := io.Copy(f, reader); err != nil {
|
||||||
return errors.WithStack(rErr)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -282,14 +400,6 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
|||||||
|
|
||||||
// Executes the installation process inside a specially created docker container.
|
// Executes the installation process inside a specially created docker container.
|
||||||
func (ip *InstallationProcess) Execute(installPath string) (string, error) {
|
func (ip *InstallationProcess) Execute(installPath string) (string, error) {
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
zap.S().Debugw(
|
|
||||||
"creating server installer container",
|
|
||||||
zap.String("server", ip.Server.Uuid),
|
|
||||||
zap.String("script_path", installPath+"/install.sh"),
|
|
||||||
)
|
|
||||||
|
|
||||||
conf := &container.Config{
|
conf := &container.Config{
|
||||||
Hostname: "installer",
|
Hostname: "installer",
|
||||||
AttachStdout: true,
|
AttachStdout: true,
|
||||||
@@ -324,7 +434,7 @@ func (ip *InstallationProcess) Execute(installPath string) (string, error) {
|
|||||||
Tmpfs: map[string]string{
|
Tmpfs: map[string]string{
|
||||||
"/tmp": "rw,exec,nosuid,size=50M",
|
"/tmp": "rw,exec,nosuid,size=50M",
|
||||||
},
|
},
|
||||||
DNS: []string{"1.1.1.1", "8.8.8.8"},
|
DNS: config.Get().Docker.Network.Dns,
|
||||||
LogConfig: container.LogConfig{
|
LogConfig: container.LogConfig{
|
||||||
Type: "local",
|
Type: "local",
|
||||||
Config: map[string]string{
|
Config: map[string]string{
|
||||||
@@ -334,37 +444,29 @@ func (ip *InstallationProcess) Execute(installPath string) (string, error) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
NetworkMode: "pterodactyl_nw",
|
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Infow("creating installer container for server process", zap.String("server", ip.Server.Uuid))
|
ip.Server.Log().WithField("install_script", installPath+"/install.sh").Info("creating install container for server process")
|
||||||
r, err := ip.client.ContainerCreate(ctx, conf, hostConf, nil, ip.Server.Uuid+"_installer")
|
r, err := ip.client.ContainerCreate(ip.context, conf, hostConf, nil, ip.Server.Id()+"_installer")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.WithStack(err)
|
return "", errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
zap.S().Infow(
|
ip.Server.Log().WithField("container_id", r.ID).Info("running installation script for server in container")
|
||||||
"running installation script for server in container",
|
if err := ip.client.ContainerStart(ip.context, r.ID, types.ContainerStartOptions{}); err != nil {
|
||||||
zap.String("server", ip.Server.Uuid),
|
|
||||||
zap.String("container_id", r.ID),
|
|
||||||
)
|
|
||||||
if err := ip.client.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}); err != nil {
|
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
go func(id string) {
|
go func(id string) {
|
||||||
ip.Server.Events().Publish(DaemonMessageEvent, "Starting installation process, this could take a few minutes...")
|
ip.Server.Events().Publish(DaemonMessageEvent, "Starting installation process, this could take a few minutes...")
|
||||||
if err := ip.StreamOutput(id); err != nil {
|
if err := ip.StreamOutput(id); err != nil {
|
||||||
zap.S().Errorw(
|
ip.Server.Log().WithField("error", err).Error("error while handling output stream for server install process")
|
||||||
"error handling streaming output for server install process",
|
|
||||||
zap.String("container_id", id),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
ip.Server.Events().Publish(DaemonMessageEvent, "Installation process completed.")
|
ip.Server.Events().Publish(DaemonMessageEvent, "Installation process completed.")
|
||||||
}(r.ID)
|
}(r.ID)
|
||||||
|
|
||||||
sChann, eChann := ip.client.ContainerWait(ctx, r.ID, container.WaitConditionNotRunning)
|
sChann, eChann := ip.client.ContainerWait(ip.context, r.ID, container.WaitConditionNotRunning)
|
||||||
select {
|
select {
|
||||||
case err := <-eChann:
|
case err := <-eChann:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -380,7 +482,7 @@ func (ip *InstallationProcess) Execute(installPath string) (string, error) {
|
|||||||
// directory, as well as to a websocket listener so that the process can be viewed in
|
// directory, as well as to a websocket listener so that the process can be viewed in
|
||||||
// the panel by administrators.
|
// the panel by administrators.
|
||||||
func (ip *InstallationProcess) StreamOutput(id string) error {
|
func (ip *InstallationProcess) StreamOutput(id string) error {
|
||||||
reader, err := ip.client.ContainerLogs(context.Background(), id, types.ContainerLogsOptions{
|
reader, err := ip.client.ContainerLogs(ip.context, id, types.ContainerLogsOptions{
|
||||||
ShowStdout: true,
|
ShowStdout: true,
|
||||||
ShowStderr: true,
|
ShowStderr: true,
|
||||||
Follow: true,
|
Follow: true,
|
||||||
@@ -398,12 +500,10 @@ func (ip *InstallationProcess) StreamOutput(id string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
zap.S().Warnw(
|
ip.Server.Log().WithFields(log.Fields{
|
||||||
"error processing scanner line in installation output for server",
|
"container_id": id,
|
||||||
zap.String("server", ip.Server.Uuid),
|
"error": errors.WithStack(err),
|
||||||
zap.String("container_id", id),
|
}).Warn("error processing scanner line in installation output for server")
|
||||||
zap.Error(errors.WithStack(err)),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -416,7 +516,7 @@ func (ip *InstallationProcess) StreamOutput(id string) error {
|
|||||||
func (s *Server) SyncInstallState(successful bool) error {
|
func (s *Server) SyncInstallState(successful bool) error {
|
||||||
r := api.NewRequester()
|
r := api.NewRequester()
|
||||||
|
|
||||||
rerr, err := r.SendInstallationStatus(s.Uuid, successful)
|
rerr, err := r.SendInstallationStatus(s.Id(), successful)
|
||||||
if rerr != nil || err != nil {
|
if rerr != nil || err != nil {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
"go.uber.org/zap"
|
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -27,10 +27,13 @@ func (s *Server) onConsoleOutput(data string) {
|
|||||||
// If the specific line of output is one that would mark the server as started,
|
// If the specific line of output is one that would mark the server as started,
|
||||||
// set the server to that state. Only do this if the server is not currently stopped
|
// set the server to that state. Only do this if the server is not currently stopped
|
||||||
// or stopping.
|
// or stopping.
|
||||||
if s.GetState() == ProcessStartingState && strings.Contains(data, s.processConfiguration.Startup.Done) {
|
match := s.ProcessConfiguration().Startup.Done
|
||||||
zap.S().Debugw(
|
|
||||||
"detected server in running state based on line output", zap.String("match", s.processConfiguration.Startup.Done), zap.String("against", data),
|
if s.GetState() == ProcessStartingState && strings.Contains(data, match) {
|
||||||
)
|
s.Log().WithFields(log.Fields{
|
||||||
|
"match": match,
|
||||||
|
"against": data,
|
||||||
|
}).Debug("detected server in running state based on console line output")
|
||||||
|
|
||||||
s.SetState(ProcessRunningState)
|
s.SetState(ProcessRunningState)
|
||||||
}
|
}
|
||||||
@@ -39,7 +42,8 @@ func (s *Server) onConsoleOutput(data string) {
|
|||||||
// set the server to be in a stopping state, otherwise crash detection will kick in and
|
// set the server to be in a stopping state, otherwise crash detection will kick in and
|
||||||
// cause the server to unexpectedly restart on the user.
|
// cause the server to unexpectedly restart on the user.
|
||||||
if s.IsRunning() {
|
if s.IsRunning() {
|
||||||
if s.processConfiguration.Stop.Type == api.ProcessStopCommand && data == s.processConfiguration.Stop.Value {
|
stop := s.ProcessConfiguration().Stop
|
||||||
|
if stop.Type == api.ProcessStopCommand && data == stop.Value {
|
||||||
s.SetState(ProcessStoppingState)
|
s.SetState(ProcessStoppingState)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
120
server/loader.go
Normal file
120
server/loader.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/apex/log"
|
||||||
|
"github.com/creasty/defaults"
|
||||||
|
"github.com/patrickmn/go-cache"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/pterodactyl/wings/api"
|
||||||
|
"github.com/remeh/sizedwaitgroup"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var servers = NewCollection(nil)
|
||||||
|
|
||||||
|
func GetServers() *Collection {
|
||||||
|
return servers
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterates over a given directory and loads all of the servers listed before returning
|
||||||
|
// them to the calling function.
|
||||||
|
func LoadDirectory() error {
|
||||||
|
if len(servers.items) != 0 {
|
||||||
|
return errors.New("cannot call LoadDirectory with a non-nil collection")
|
||||||
|
}
|
||||||
|
|
||||||
|
// We could theoretically use a standard wait group here, however doing
|
||||||
|
// that introduces the potential to crash the program due to too many
|
||||||
|
// open files. This wouldn't happen on a small setup, but once the daemon is
|
||||||
|
// handling many servers you run that risk.
|
||||||
|
//
|
||||||
|
// For now just process 10 files at a time, that should be plenty fast to
|
||||||
|
// read and parse the YAML. We should probably make this configurable down
|
||||||
|
// the road to help big instances scale better.
|
||||||
|
wg := sizedwaitgroup.New(10)
|
||||||
|
|
||||||
|
configs, rerr, err := api.NewRequester().GetAllServerConfigurations()
|
||||||
|
if err != nil || rerr != nil {
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New(rerr.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("retrieving cached server states from disk")
|
||||||
|
states, err := getServerStates()
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.WithField("total_configs", len(configs)).Debug("looping over received configurations from API")
|
||||||
|
for uuid, data := range configs {
|
||||||
|
wg.Add()
|
||||||
|
|
||||||
|
go func(uuid string, data *api.ServerConfigurationResponse) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
log.WithField("uuid", uuid).Debug("creating server object from configuration")
|
||||||
|
s, err := FromConfiguration(data)
|
||||||
|
if err != nil {
|
||||||
|
log.WithField("server", uuid).WithField("error", err).Error("failed to load server, skipping...")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if state, exists := states[s.Id()]; exists {
|
||||||
|
s.SetState(state)
|
||||||
|
s.Log().WithField("state", s.GetState()).Debug("loaded server state from cache file")
|
||||||
|
}
|
||||||
|
|
||||||
|
servers.Add(s)
|
||||||
|
}(uuid, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until we've processed all of the configuration files in the directory
|
||||||
|
// before continuing.
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initializes a server using a data byte array. This will be marshaled into the
|
||||||
|
// given struct using a YAML marshaler. This will also configure the given environment
|
||||||
|
// for a server.
|
||||||
|
func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
|
||||||
|
cfg := Configuration{}
|
||||||
|
if err := defaults.Set(&cfg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := new(Server)
|
||||||
|
s.cfg = cfg
|
||||||
|
|
||||||
|
if err := s.UpdateDataStructure(data.Settings, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.AddEventListeners()
|
||||||
|
|
||||||
|
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||||
|
// this logic in. When we're ready to support other environment we'll need to make
|
||||||
|
// some modifications here obviously.
|
||||||
|
if err := NewDockerEnvironment(s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.cache = cache.New(time.Minute*10, time.Minute*15)
|
||||||
|
s.Archiver = Archiver{
|
||||||
|
Server: s,
|
||||||
|
}
|
||||||
|
s.Filesystem = Filesystem{
|
||||||
|
Server: s,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forces the configuration to be synced with the panel.
|
||||||
|
if err := s.SyncWithConfiguration(data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
8
server/mount.go
Normal file
8
server/mount.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
// Mount represents a Server Mount.
|
||||||
|
type Mount struct {
|
||||||
|
Target string `json:"target"`
|
||||||
|
Source string `json:"source"`
|
||||||
|
ReadOnly bool `json:"read_only"`
|
||||||
|
}
|
||||||
10
server/process.go
Normal file
10
server/process.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import "github.com/pterodactyl/wings/api"
|
||||||
|
|
||||||
|
func (s *Server) ProcessConfiguration() *api.ProcessConfiguration {
|
||||||
|
s.RLock()
|
||||||
|
defer s.RUnlock()
|
||||||
|
|
||||||
|
return s.procConfig
|
||||||
|
}
|
||||||
@@ -3,24 +3,38 @@ package server
|
|||||||
import (
|
import (
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"math"
|
"math"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Defines the current resource usage for a given server instance. If a server is offline you
|
// Defines the current resource usage for a given server instance. If a server is offline you
|
||||||
// should obviously expect memory and CPU usage to be 0. However, disk will always be returned
|
// should obviously expect memory and CPU usage to be 0. However, disk will always be returned
|
||||||
// since that is not dependent on the server being running to collect that data.
|
// since that is not dependent on the server being running to collect that data.
|
||||||
type ResourceUsage struct {
|
type ResourceUsage struct {
|
||||||
// The total amount of memory, in bytes, that this server instance is consuming.
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// The current server status.
|
||||||
|
State string `json:"state" default:"offline"`
|
||||||
|
|
||||||
|
// The total amount of memory, in bytes, that this server instance is consuming. This is
|
||||||
|
// calculated slightly differently than just using the raw Memory field that the stats
|
||||||
|
// return from the container, so please check the code setting this value for how that
|
||||||
|
// is calculated.
|
||||||
Memory uint64 `json:"memory_bytes"`
|
Memory uint64 `json:"memory_bytes"`
|
||||||
|
|
||||||
// The total amount of memory this container or resource can use. Inside Docker this is
|
// The total amount of memory this container or resource can use. Inside Docker this is
|
||||||
// going to be higher than you'd expect because we're automatically allocating overhead
|
// going to be higher than you'd expect because we're automatically allocating overhead
|
||||||
// abilities for the container, so its not going to be a perfect match.
|
// abilities for the container, so its not going to be a perfect match.
|
||||||
MemoryLimit uint64 `json:"memory_limit_bytes"`
|
MemoryLimit uint64 `json:"memory_limit_bytes"`
|
||||||
|
|
||||||
// The absolute CPU usage is the amount of CPU used in relation to the entire system and
|
// The absolute CPU usage is the amount of CPU used in relation to the entire system and
|
||||||
// does not take into account any limits on the server process itself.
|
// does not take into account any limits on the server process itself.
|
||||||
CpuAbsolute float64 `json:"cpu_absolute"`
|
CpuAbsolute float64 `json:"cpu_absolute"`
|
||||||
|
|
||||||
// The current disk space being used by the server. This is cached to prevent slow lookup
|
// The current disk space being used by the server. This is cached to prevent slow lookup
|
||||||
// issues on frequent refreshes.
|
// issues on frequent refreshes.
|
||||||
Disk int64 `json:"disk_bytes"`
|
Disk int64 `json:"disk_bytes"`
|
||||||
|
|
||||||
// Current network transmit in & out for a container.
|
// Current network transmit in & out for a container.
|
||||||
Network struct {
|
Network struct {
|
||||||
RxBytes uint64 `json:"rx_bytes"`
|
RxBytes uint64 `json:"rx_bytes"`
|
||||||
@@ -28,11 +42,92 @@ type ResourceUsage struct {
|
|||||||
} `json:"network"`
|
} `json:"network"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns the resource usage stats for the server instance. If the server is not running, only the
|
||||||
|
// disk space currently used will be returned. When the server is running all of the other stats will
|
||||||
|
// be returned.
|
||||||
|
//
|
||||||
|
// When a process is stopped all of the stats are zeroed out except for the disk.
|
||||||
|
func (s *Server) Proc() *ResourceUsage {
|
||||||
|
s.resources.mu.RLock()
|
||||||
|
defer s.resources.mu.RUnlock()
|
||||||
|
|
||||||
|
return &s.resources
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the servers current state.
|
||||||
|
func (ru *ResourceUsage) getInternalState() string {
|
||||||
|
ru.mu.RLock()
|
||||||
|
defer ru.mu.RUnlock()
|
||||||
|
|
||||||
|
return ru.State
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the new state for the server.
|
||||||
|
func (ru *ResourceUsage) setInternalState(state string) {
|
||||||
|
ru.mu.Lock()
|
||||||
|
ru.State = state
|
||||||
|
ru.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resets the usages values to zero, used when a server is stopped to ensure we don't hold
|
||||||
|
// onto any values incorrectly.
|
||||||
|
func (ru *ResourceUsage) Empty() {
|
||||||
|
ru.mu.Lock()
|
||||||
|
defer ru.mu.Unlock()
|
||||||
|
|
||||||
|
ru.Memory = 0
|
||||||
|
ru.CpuAbsolute = 0
|
||||||
|
ru.Network.TxBytes = 0
|
||||||
|
ru.Network.RxBytes = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ru *ResourceUsage) SetDisk(i int64) {
|
||||||
|
ru.mu.Lock()
|
||||||
|
defer ru.mu.Unlock()
|
||||||
|
|
||||||
|
ru.Disk = i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ru *ResourceUsage) UpdateFromDocker(v *types.StatsJSON) {
|
||||||
|
ru.mu.Lock()
|
||||||
|
defer ru.mu.Unlock()
|
||||||
|
|
||||||
|
ru.CpuAbsolute = ru.calculateDockerAbsoluteCpu(&v.PreCPUStats, &v.CPUStats)
|
||||||
|
ru.Memory = ru.calculateDockerMemory(v.MemoryStats)
|
||||||
|
ru.MemoryLimit = v.MemoryStats.Limit
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ru *ResourceUsage) UpdateNetworkBytes(nw *types.NetworkStats) {
|
||||||
|
atomic.AddUint64(&ru.Network.RxBytes, nw.RxBytes)
|
||||||
|
atomic.AddUint64(&ru.Network.TxBytes, nw.TxBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The "docker stats" CLI call does not return the same value as the types.MemoryStats.Usage
|
||||||
|
// value which can be rather confusing to people trying to compare panel usage to
|
||||||
|
// their stats output.
|
||||||
|
//
|
||||||
|
// This math is straight up lifted from their CLI repository in order to show the same
|
||||||
|
// values to avoid people bothering me about it. It should also reflect a slightly more
|
||||||
|
// correct memory value anyways.
|
||||||
|
//
|
||||||
|
// @see https://github.com/docker/cli/blob/96e1d1d6/cli/command/container/stats_helpers.go#L227-L249
|
||||||
|
func (ru *ResourceUsage) calculateDockerMemory(stats types.MemoryStats) uint64 {
|
||||||
|
if v, ok := stats.Stats["total_inactive_file"]; ok && v < stats.Usage {
|
||||||
|
return stats.Usage - v
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := stats.Stats["inactive_file"]; v < stats.Usage {
|
||||||
|
return stats.Usage - v
|
||||||
|
}
|
||||||
|
|
||||||
|
return stats.Usage
|
||||||
|
}
|
||||||
|
|
||||||
// Calculates the absolute CPU usage used by the server process on the system, not constrained
|
// Calculates the absolute CPU usage used by the server process on the system, not constrained
|
||||||
// by the defined CPU limits on the container.
|
// by the defined CPU limits on the container.
|
||||||
//
|
//
|
||||||
// @see https://github.com/docker/cli/blob/aa097cf1aa19099da70930460250797c8920b709/cli/command/container/stats_helpers.go#L166
|
// @see https://github.com/docker/cli/blob/aa097cf1aa19099da70930460250797c8920b709/cli/command/container/stats_helpers.go#L166
|
||||||
func (ru *ResourceUsage) CalculateAbsoluteCpu(pStats *types.CPUStats, stats *types.CPUStats) float64 {
|
func (ru *ResourceUsage) calculateDockerAbsoluteCpu(pStats *types.CPUStats, stats *types.CPUStats) float64 {
|
||||||
// Calculate the change in CPU usage between the current and previous reading.
|
// Calculate the change in CPU usage between the current and previous reading.
|
||||||
cpuDelta := float64(stats.CPUUsage.TotalUsage) - float64(pStats.CPUUsage.TotalUsage)
|
cpuDelta := float64(stats.CPUUsage.TotalUsage) - float64(pStats.CPUUsage.TotalUsage)
|
||||||
|
|
||||||
|
|||||||
269
server/server.go
269
server/server.go
@@ -1,66 +1,40 @@
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/creasty/defaults"
|
"github.com/apex/log"
|
||||||
"github.com/patrickmn/go-cache"
|
"github.com/patrickmn/go-cache"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
"github.com/pterodactyl/wings/config"
|
"golang.org/x/sync/semaphore"
|
||||||
"github.com/remeh/sizedwaitgroup"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var servers *Collection
|
|
||||||
|
|
||||||
func GetServers() *Collection {
|
|
||||||
return servers
|
|
||||||
}
|
|
||||||
|
|
||||||
// High level definition for a server instance being controlled by Wings.
|
// High level definition for a server instance being controlled by Wings.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
// The unique identifier for the server that should be used when referencing
|
// Internal mutex used to block actions that need to occur sequentially, such as
|
||||||
// it against the Panel API (and internally). This will be used when naming
|
// writing the configuration to the disk.
|
||||||
// docker containers as well as in log output.
|
sync.RWMutex
|
||||||
Uuid string `json:"uuid"`
|
|
||||||
|
|
||||||
// Whether or not the server is in a suspended state. Suspended servers cannot
|
// Maintains the configuration for the server. This is the data that gets returned by the Panel
|
||||||
// be started or modified except in certain scenarios by an admin user.
|
// such as build settings and container images.
|
||||||
Suspended bool `json:"suspended"`
|
cfg Configuration
|
||||||
|
|
||||||
// The power state of the server.
|
// The crash handler for this server instance.
|
||||||
State string `default:"offline" json:"state"`
|
crasher CrashHandler
|
||||||
|
|
||||||
// The command that should be used when booting up the server instance.
|
resources ResourceUsage
|
||||||
Invocation string `json:"invocation"`
|
Archiver Archiver `json:"-"`
|
||||||
|
Environment Environment `json:"-"`
|
||||||
// An array of environment variables that should be passed along to the running
|
Filesystem Filesystem `json:"-"`
|
||||||
// server process.
|
|
||||||
EnvVars map[string]string `json:"environment" yaml:"environment"`
|
|
||||||
|
|
||||||
Archiver Archiver `json:"-" yaml:"-"`
|
|
||||||
CrashDetection CrashDetection `json:"crash_detection" yaml:"crash_detection"`
|
|
||||||
Build BuildSettings `json:"build"`
|
|
||||||
Allocations Allocations `json:"allocations"`
|
|
||||||
Environment Environment `json:"-" yaml:"-"`
|
|
||||||
Filesystem Filesystem `json:"-" yaml:"-"`
|
|
||||||
Resources ResourceUsage `json:"resources" yaml:"-"`
|
|
||||||
|
|
||||||
Container struct {
|
|
||||||
// Defines the Docker image that will be used for this server
|
|
||||||
Image string `json:"image,omitempty"`
|
|
||||||
// If set to true, OOM killer will be disabled on the server's Docker container.
|
|
||||||
// If not present (nil) we will default to disabling it.
|
|
||||||
OomDisabled bool `default:"true" json:"oom_disabled" yaml:"oom_disabled"`
|
|
||||||
} `json:"container,omitempty"`
|
|
||||||
|
|
||||||
// Server cache used to store frequently requested information in memory and make
|
// Server cache used to store frequently requested information in memory and make
|
||||||
// certain long operations return faster. For example, FS disk space usage.
|
// certain long operations return faster. For example, FS disk space usage.
|
||||||
Cache *cache.Cache `json:"-" yaml:"-"`
|
cache *cache.Cache
|
||||||
|
|
||||||
// Events emitted by the server instance.
|
// Events emitted by the server instance.
|
||||||
emitter *EventBus
|
emitter *EventBus
|
||||||
@@ -68,173 +42,28 @@ type Server struct {
|
|||||||
// Defines the process configuration for the server instance. This is dynamically
|
// Defines the process configuration for the server instance. This is dynamically
|
||||||
// fetched from the Pterodactyl Server instance each time the server process is
|
// fetched from the Pterodactyl Server instance each time the server process is
|
||||||
// started, and then cached here.
|
// started, and then cached here.
|
||||||
processConfiguration *api.ProcessConfiguration
|
procConfig *api.ProcessConfiguration
|
||||||
|
|
||||||
// Internal mutex used to block actions that need to occur sequentially, such as
|
// Tracks the installation process for this server and prevents a server from running
|
||||||
// writing the configuration to the disk.
|
// two installer processes at the same time. This also allows us to cancel a running
|
||||||
sync.RWMutex
|
// installation process, for example when a server is deleted from the panel while the
|
||||||
|
// installer process is still running.
|
||||||
|
installer InstallerDetails
|
||||||
}
|
}
|
||||||
|
|
||||||
// The build settings for a given server that impact docker container creation and
|
type InstallerDetails struct {
|
||||||
// resource limits for a server instance.
|
// The cancel function for the installer. This will be a non-nil value while there
|
||||||
type BuildSettings struct {
|
// is an installer running for the server.
|
||||||
// The total amount of memory in megabytes that this server is allowed to
|
cancel *context.CancelFunc
|
||||||
// use on the host system.
|
|
||||||
MemoryLimit int64 `json:"memory_limit" yaml:"memory"`
|
|
||||||
|
|
||||||
// The amount of additional swap space to be provided to a container instance.
|
// Installer lock. You should obtain an exclusive lock on this context while running
|
||||||
Swap int64 `json:"swap"`
|
// the installation process and release it when finished.
|
||||||
|
sem *semaphore.Weighted
|
||||||
// The relative weight for IO operations in a container. This is relative to other
|
|
||||||
// containers on the system and should be a value between 10 and 1000.
|
|
||||||
IoWeight uint16 `json:"io_weight" yaml:"io"`
|
|
||||||
|
|
||||||
// The percentage of CPU that this instance is allowed to consume relative to
|
|
||||||
// the host. A value of 200% represents complete utilization of two cores. This
|
|
||||||
// should be a value between 1 and THREAD_COUNT * 100.
|
|
||||||
CpuLimit int64 `json:"cpu_limit" yaml:"cpu"`
|
|
||||||
|
|
||||||
// The amount of disk space in megabytes that a server is allowed to use.
|
|
||||||
DiskSpace int64 `json:"disk_space" yaml:"disk"`
|
|
||||||
|
|
||||||
// Sets which CPU threads can be used by the docker instance.
|
|
||||||
Threads string `json:"threads" yaml:"threads"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Converts the CPU limit for a server build into a number that can be better understood
|
// Returns the UUID for the server instance.
|
||||||
// by the Docker environment. If there is no limit set, return -1 which will indicate to
|
func (s *Server) Id() string {
|
||||||
// Docker that it has unlimited CPU quota.
|
return s.Config().GetUuid()
|
||||||
func (b *BuildSettings) ConvertedCpuLimit() int64 {
|
|
||||||
if b.CpuLimit == 0 {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.CpuLimit * 1000
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the amount of swap available as a total in bytes. This is returned as the amount
|
|
||||||
// of memory available to the server initially, PLUS the amount of additional swap to include
|
|
||||||
// which is the format used by Docker.
|
|
||||||
func (b *BuildSettings) ConvertedSwap() int64 {
|
|
||||||
if b.Swap < 0 {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return (b.Swap * 1000000) + (b.MemoryLimit * 1000000)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Defines the allocations available for a given server. When using the Docker environment
|
|
||||||
// driver these correspond to mappings for the container that allow external connections.
|
|
||||||
type Allocations struct {
|
|
||||||
// Defines the default allocation that should be used for this server. This is
|
|
||||||
// what will be used for {SERVER_IP} and {SERVER_PORT} when modifying configuration
|
|
||||||
// files or the startup arguments for a server.
|
|
||||||
DefaultMapping struct {
|
|
||||||
Ip string `json:"ip"`
|
|
||||||
Port int `json:"port"`
|
|
||||||
} `json:"default" yaml:"default"`
|
|
||||||
|
|
||||||
// Mappings contains all of the ports that should be assigned to a given server
|
|
||||||
// attached to the IP they correspond to.
|
|
||||||
Mappings map[string][]int `json:"mappings"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterates over a given directory and loads all of the servers listed before returning
|
|
||||||
// them to the calling function.
|
|
||||||
func LoadDirectory() error {
|
|
||||||
// We could theoretically use a standard wait group here, however doing
|
|
||||||
// that introduces the potential to crash the program due to too many
|
|
||||||
// open files. This wouldn't happen on a small setup, but once the daemon is
|
|
||||||
// handling many servers you run that risk.
|
|
||||||
//
|
|
||||||
// For now just process 10 files at a time, that should be plenty fast to
|
|
||||||
// read and parse the YAML. We should probably make this configurable down
|
|
||||||
// the road to help big instances scale better.
|
|
||||||
wg := sizedwaitgroup.New(10)
|
|
||||||
|
|
||||||
configs, rerr, err := api.NewRequester().GetAllServerConfigurations()
|
|
||||||
if err != nil || rerr != nil {
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.New(rerr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
states, err := getServerStates()
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
servers = NewCollection(nil)
|
|
||||||
|
|
||||||
for uuid, data := range configs {
|
|
||||||
wg.Add()
|
|
||||||
|
|
||||||
go func(uuid string, data *api.ServerConfigurationResponse) {
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
s, err := FromConfiguration(data)
|
|
||||||
if err != nil {
|
|
||||||
zap.S().Errorw("failed to load server, skipping...", zap.String("server", uuid), zap.Error(err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if state, exists := states[s.Uuid]; exists {
|
|
||||||
s.SetState(state)
|
|
||||||
zap.S().Debugw("loaded server state from cache", zap.String("server", s.Uuid), zap.String("state", s.GetState()))
|
|
||||||
}
|
|
||||||
|
|
||||||
servers.Add(s)
|
|
||||||
}(uuid, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until we've processed all of the configuration files in the directory
|
|
||||||
// before continuing.
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initializes a server using a data byte array. This will be marshaled into the
|
|
||||||
// given struct using a YAML marshaler. This will also configure the given environment
|
|
||||||
// for a server.
|
|
||||||
func FromConfiguration(data *api.ServerConfigurationResponse) (*Server, error) {
|
|
||||||
s := new(Server)
|
|
||||||
|
|
||||||
if err := defaults.Set(s); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.UpdateDataStructure(data.Settings, false); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.AddEventListeners()
|
|
||||||
|
|
||||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
|
||||||
// this logic in. When we're ready to support other environment we'll need to make
|
|
||||||
// some modifications here obviously.
|
|
||||||
if err := NewDockerEnvironment(s); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Cache = cache.New(time.Minute*10, time.Minute*15)
|
|
||||||
s.Archiver = Archiver{
|
|
||||||
Server: s,
|
|
||||||
}
|
|
||||||
s.Filesystem = Filesystem{
|
|
||||||
Configuration: &config.Get().System,
|
|
||||||
Server: s,
|
|
||||||
}
|
|
||||||
s.Resources = ResourceUsage{}
|
|
||||||
|
|
||||||
// Forces the configuration to be synced with the panel.
|
|
||||||
if err := s.SyncWithConfiguration(data); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns all of the environment variables that should be assigned to a running
|
// Returns all of the environment variables that should be assigned to a running
|
||||||
@@ -244,26 +73,30 @@ func (s *Server) GetEnvironmentVariables() []string {
|
|||||||
|
|
||||||
var out = []string{
|
var out = []string{
|
||||||
fmt.Sprintf("TZ=%s", zone),
|
fmt.Sprintf("TZ=%s", zone),
|
||||||
fmt.Sprintf("STARTUP=%s", s.Invocation),
|
fmt.Sprintf("STARTUP=%s", s.Config().Invocation),
|
||||||
fmt.Sprintf("SERVER_MEMORY=%d", s.Build.MemoryLimit),
|
fmt.Sprintf("SERVER_MEMORY=%d", s.Build().MemoryLimit),
|
||||||
fmt.Sprintf("SERVER_IP=%s", s.Allocations.DefaultMapping.Ip),
|
fmt.Sprintf("SERVER_IP=%s", s.Config().Allocations.DefaultMapping.Ip),
|
||||||
fmt.Sprintf("SERVER_PORT=%d", s.Allocations.DefaultMapping.Port),
|
fmt.Sprintf("SERVER_PORT=%d", s.Config().Allocations.DefaultMapping.Port),
|
||||||
}
|
}
|
||||||
|
|
||||||
eloop:
|
eloop:
|
||||||
for k, v := range s.EnvVars {
|
for k := range s.Config().EnvVars {
|
||||||
for _, e := range out {
|
for _, e := range out {
|
||||||
if strings.HasPrefix(e, strings.ToUpper(k)) {
|
if strings.HasPrefix(e, strings.ToUpper(k)) {
|
||||||
continue eloop
|
continue eloop
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
out = append(out, fmt.Sprintf("%s=%s", strings.ToUpper(k), v))
|
out = append(out, fmt.Sprintf("%s=%s", strings.ToUpper(k), s.Config().EnvVars.Get(k)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) Log() *log.Entry {
|
||||||
|
return log.WithField("server", s.Id())
|
||||||
|
}
|
||||||
|
|
||||||
// Syncs the state of the server on the Panel with Wings. This ensures that we're always
|
// Syncs the state of the server on the Panel with Wings. This ensures that we're always
|
||||||
// using the state of the server from the Panel and allows us to not require successful
|
// using the state of the server from the Panel and allows us to not require successful
|
||||||
// API calls to Wings to do things.
|
// API calls to Wings to do things.
|
||||||
@@ -293,7 +126,10 @@ func (s *Server) SyncWithConfiguration(cfg *api.ServerConfigurationResponse) err
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.processConfiguration = cfg.ProcessConfiguration
|
s.Lock()
|
||||||
|
s.procConfig = cfg.ProcessConfiguration
|
||||||
|
s.Unlock()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,7 +154,7 @@ func (s *Server) CreateEnvironment() error {
|
|||||||
|
|
||||||
// Gets the process configuration data for the server.
|
// Gets the process configuration data for the server.
|
||||||
func (s *Server) GetProcessConfiguration() (*api.ServerConfigurationResponse, *api.RequestError, error) {
|
func (s *Server) GetProcessConfiguration() (*api.ServerConfigurationResponse, *api.RequestError, error) {
|
||||||
return api.NewRequester().GetServerConfiguration(s.Uuid)
|
return api.NewRequester().GetServerConfiguration(s.Id())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function that can receieve a power action and then process the
|
// Helper function that can receieve a power action and then process the
|
||||||
@@ -328,11 +164,7 @@ func (s *Server) HandlePowerAction(action PowerAction) error {
|
|||||||
case "start":
|
case "start":
|
||||||
return s.Environment.Start()
|
return s.Environment.Start()
|
||||||
case "restart":
|
case "restart":
|
||||||
if err := s.Environment.WaitForStop(60, false); err != nil {
|
return s.Environment.Restart()
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.Environment.Start()
|
|
||||||
case "stop":
|
case "stop":
|
||||||
return s.Environment.Stop()
|
return s.Environment.Stop()
|
||||||
case "kill":
|
case "kill":
|
||||||
@@ -341,3 +173,8 @@ func (s *Server) HandlePowerAction(action PowerAction) error {
|
|||||||
return errors.New("an invalid power action was provided")
|
return errors.New("an invalid power action was provided")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checks if the server is marked as being suspended or not on the system.
|
||||||
|
func (s *Server) IsSuspended() bool {
|
||||||
|
return s.Config().Suspended
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"go.uber.org/zap"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@@ -14,6 +13,13 @@ import (
|
|||||||
|
|
||||||
var stateMutex sync.Mutex
|
var stateMutex sync.Mutex
|
||||||
|
|
||||||
|
const (
|
||||||
|
ProcessOfflineState = "offline"
|
||||||
|
ProcessStartingState = "starting"
|
||||||
|
ProcessRunningState = "running"
|
||||||
|
ProcessStoppingState = "stopping"
|
||||||
|
)
|
||||||
|
|
||||||
// Returns the state of the servers.
|
// Returns the state of the servers.
|
||||||
func getServerStates() (map[string]string, error) {
|
func getServerStates() (map[string]string, error) {
|
||||||
// Request a lock after we check if the file exists.
|
// Request a lock after we check if the file exists.
|
||||||
@@ -41,7 +47,7 @@ func saveServerStates() error {
|
|||||||
// Get the states of all servers on the daemon.
|
// Get the states of all servers on the daemon.
|
||||||
states := map[string]string{}
|
states := map[string]string{}
|
||||||
for _, s := range GetServers().All() {
|
for _, s := range GetServers().All() {
|
||||||
states[s.Uuid] = s.GetState()
|
states[s.Id()] = s.GetState()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert the map to a json object.
|
// Convert the map to a json object.
|
||||||
@@ -61,13 +67,6 @@ func saveServerStates() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
ProcessOfflineState = "offline"
|
|
||||||
ProcessStartingState = "starting"
|
|
||||||
ProcessRunningState = "running"
|
|
||||||
ProcessStoppingState = "stopping"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sets the state of the server internally. This function handles crash detection as
|
// Sets the state of the server internally. This function handles crash detection as
|
||||||
// well as reporting to event listeners for the server.
|
// well as reporting to event listeners for the server.
|
||||||
func (s *Server) SetState(state string) error {
|
func (s *Server) SetState(state string) error {
|
||||||
@@ -77,16 +76,14 @@ func (s *Server) SetState(state string) error {
|
|||||||
|
|
||||||
prevState := s.GetState()
|
prevState := s.GetState()
|
||||||
|
|
||||||
// Obtain a mutex lock and update the current state of the server.
|
// Update the currently tracked state for the server.
|
||||||
s.Lock()
|
s.Proc().setInternalState(state)
|
||||||
s.State = state
|
|
||||||
|
|
||||||
// Emit the event to any listeners that are currently registered.
|
// Emit the event to any listeners that are currently registered.
|
||||||
zap.S().Debugw("saw server status change event", zap.String("server", s.Uuid), zap.String("status", s.State))
|
if prevState != state {
|
||||||
s.Events().Publish(StatusEvent, s.State)
|
s.Log().WithField("status", s.Proc().State).Debug("saw server status change event")
|
||||||
|
s.Events().Publish(StatusEvent, s.Proc().State)
|
||||||
// Release the lock as it is no longer needed for the following actions.
|
}
|
||||||
s.Unlock()
|
|
||||||
|
|
||||||
// Persist this change to the disk immediately so that should the Daemon be stopped or
|
// Persist this change to the disk immediately so that should the Daemon be stopped or
|
||||||
// crash we can immediately restore the server state.
|
// crash we can immediately restore the server state.
|
||||||
@@ -98,7 +95,7 @@ func (s *Server) SetState(state string) error {
|
|||||||
// to the disk should we forget to do it elsewhere.
|
// to the disk should we forget to do it elsewhere.
|
||||||
go func() {
|
go func() {
|
||||||
if err := saveServerStates(); err != nil {
|
if err := saveServerStates(); err != nil {
|
||||||
zap.S().Warnw("failed to write server states to disk", zap.Error(err))
|
s.Log().WithField("error", err).Warn("failed to write server states to disk")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -111,14 +108,14 @@ func (s *Server) SetState(state string) error {
|
|||||||
// separate thread as to not block any actions currently taking place in the flow
|
// separate thread as to not block any actions currently taking place in the flow
|
||||||
// that called this function.
|
// that called this function.
|
||||||
if (prevState == ProcessStartingState || prevState == ProcessRunningState) && s.GetState() == ProcessOfflineState {
|
if (prevState == ProcessStartingState || prevState == ProcessRunningState) && s.GetState() == ProcessOfflineState {
|
||||||
zap.S().Infow("detected server as entering a potentially crashed state; running handler", zap.String("server", s.Uuid))
|
s.Log().Info("detected server as entering a crashed state; running crash handler")
|
||||||
|
|
||||||
go func(server *Server) {
|
go func(server *Server) {
|
||||||
if err := server.handleServerCrash(); err != nil {
|
if err := server.handleServerCrash(); err != nil {
|
||||||
if IsTooFrequentCrashError(err) {
|
if IsTooFrequentCrashError(err) {
|
||||||
zap.S().Infow("did not restart server after crash; occurred too soon after last", zap.String("server", server.Uuid))
|
server.Log().Info("did not restart server after crash; occurred too soon after the last")
|
||||||
} else {
|
} else {
|
||||||
zap.S().Errorw("failed to handle server crash state", zap.String("server", server.Uuid), zap.Error(err))
|
server.Log().WithField("error", err).Error("failed to handle server crash")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}(s)
|
}(s)
|
||||||
@@ -129,15 +126,14 @@ func (s *Server) SetState(state string) error {
|
|||||||
|
|
||||||
// Returns the current state of the server in a race-safe manner.
|
// Returns the current state of the server in a race-safe manner.
|
||||||
func (s *Server) GetState() string {
|
func (s *Server) GetState() string {
|
||||||
s.RLock()
|
return s.Proc().getInternalState()
|
||||||
defer s.RUnlock()
|
|
||||||
|
|
||||||
return s.State
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines if the server state is running or not. This is different than the
|
// Determines if the server state is running or not. This is different than the
|
||||||
// environment state, it is simply the tracked state from this daemon instance, and
|
// environment state, it is simply the tracked state from this daemon instance, and
|
||||||
// not the response from Docker.
|
// not the response from Docker.
|
||||||
func (s *Server) IsRunning() bool {
|
func (s *Server) IsRunning() bool {
|
||||||
return s.GetState() == ProcessRunningState || s.GetState() == ProcessStartingState
|
st := s.GetState()
|
||||||
|
|
||||||
|
return st == ProcessRunningState || st == ProcessStartingState
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"github.com/buger/jsonparser"
|
"github.com/buger/jsonparser"
|
||||||
"github.com/imdario/mergo"
|
"github.com/imdario/mergo"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Merges data passed through in JSON form into the existing server object.
|
// Merges data passed through in JSON form into the existing server object.
|
||||||
@@ -16,7 +15,7 @@ import (
|
|||||||
// it is up to the specific environment to determine what needs to happen when
|
// it is up to the specific environment to determine what needs to happen when
|
||||||
// that is the case.
|
// that is the case.
|
||||||
func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
||||||
src := new(Server)
|
src := new(Configuration)
|
||||||
if err := json.Unmarshal(data, src); err != nil {
|
if err := json.Unmarshal(data, src); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
@@ -24,16 +23,42 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
|||||||
// Don't allow obviously corrupted data to pass through into this function. If the UUID
|
// Don't allow obviously corrupted data to pass through into this function. If the UUID
|
||||||
// doesn't match something has gone wrong and the API is attempting to meld this server
|
// doesn't match something has gone wrong and the API is attempting to meld this server
|
||||||
// instance into a totally different one, which would be bad.
|
// instance into a totally different one, which would be bad.
|
||||||
if src.Uuid != "" && s.Uuid != "" && src.Uuid != s.Uuid {
|
if src.Uuid != "" && s.Id() != "" && src.Uuid != s.Id() {
|
||||||
return errors.New("attempting to merge a data stack with an invalid UUID")
|
return errors.New("attempting to merge a data stack with an invalid UUID")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Grab a copy of the configuration to work on.
|
||||||
|
c := *s.Config()
|
||||||
|
|
||||||
|
// Lock our copy of the configuration since the defered unlock will end up acting upon this
|
||||||
|
// new memory address rather than the old one. If we don't lock this, the defered unlock will
|
||||||
|
// cause a panic when it goes to run. However, since we only update s.cfg at the end, if there
|
||||||
|
// is an error before that point we'll still properly unlock the original configuration for the
|
||||||
|
// server.
|
||||||
|
c.mu.Lock()
|
||||||
|
|
||||||
|
// Lock the server configuration while we're doing this merge to avoid anything
|
||||||
|
// trying to overwrite it or make modifications while we're sorting out what we
|
||||||
|
// need to do.
|
||||||
|
s.cfg.mu.Lock()
|
||||||
|
defer s.cfg.mu.Unlock()
|
||||||
|
|
||||||
// Merge the new data object that we have received with the existing server data object
|
// Merge the new data object that we have received with the existing server data object
|
||||||
// and then save it to the disk so it is persistent.
|
// and then save it to the disk so it is persistent.
|
||||||
if err := mergo.Merge(s, src, mergo.WithOverride); err != nil {
|
if err := mergo.Merge(&c, src, mergo.WithOverride); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Don't explode if we're setting CPU limits to 0. Mergo sees that as an empty value
|
||||||
|
// so it won't override the value we've passed through in the API call. However, we can
|
||||||
|
// safely assume that we're passing through valid data structures here. I foresee this
|
||||||
|
// backfiring at some point, but until then...
|
||||||
|
//
|
||||||
|
// We'll go ahead and do this with swap as well.
|
||||||
|
c.Build.CpuLimit = src.Build.CpuLimit
|
||||||
|
c.Build.Swap = src.Build.Swap
|
||||||
|
c.Build.DiskSpace = src.Build.DiskSpace
|
||||||
|
|
||||||
// Mergo can't quite handle this boolean value correctly, so for now we'll just
|
// Mergo can't quite handle this boolean value correctly, so for now we'll just
|
||||||
// handle this edge case manually since none of the other data passed through in this
|
// handle this edge case manually since none of the other data passed through in this
|
||||||
// request is going to be boolean. Allegedly.
|
// request is going to be boolean. Allegedly.
|
||||||
@@ -42,7 +67,7 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.Container.OomDisabled = v
|
c.Container.OomDisabled = v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mergo also cannot handle this boolean value.
|
// Mergo also cannot handle this boolean value.
|
||||||
@@ -51,21 +76,28 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
|||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s.Suspended = v
|
c.Suspended = v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Environment and Mappings should be treated as a full update at all times, never a
|
// Environment and Mappings should be treated as a full update at all times, never a
|
||||||
// true patch, otherwise we can't know what we're passing along.
|
// true patch, otherwise we can't know what we're passing along.
|
||||||
if src.EnvVars != nil && len(src.EnvVars) > 0 {
|
if src.EnvVars != nil && len(src.EnvVars) > 0 {
|
||||||
s.EnvVars = src.EnvVars
|
c.EnvVars = src.EnvVars
|
||||||
}
|
}
|
||||||
|
|
||||||
if src.Allocations.Mappings != nil && len(src.Allocations.Mappings) > 0 {
|
if src.Allocations.Mappings != nil && len(src.Allocations.Mappings) > 0 {
|
||||||
s.Allocations.Mappings = src.Allocations.Mappings
|
c.Allocations.Mappings = src.Allocations.Mappings
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if src.Mounts != nil && len(src.Mounts) > 0 {
|
||||||
|
c.Mounts = src.Mounts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the configuration once we have a lock on the configuration object.
|
||||||
|
s.cfg = c
|
||||||
|
|
||||||
if background {
|
if background {
|
||||||
s.runBackgroundActions()
|
go s.runBackgroundActions()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -78,31 +110,22 @@ func (s *Server) UpdateDataStructure(data []byte, background bool) error {
|
|||||||
// These tasks run in independent threads where relevant to speed up any updates
|
// These tasks run in independent threads where relevant to speed up any updates
|
||||||
// that need to happen.
|
// that need to happen.
|
||||||
func (s *Server) runBackgroundActions() {
|
func (s *Server) runBackgroundActions() {
|
||||||
|
// Check if the s is now suspended, and if so and the process is not terminated
|
||||||
|
// yet, do it immediately.
|
||||||
|
if s.IsSuspended() && s.GetState() != ProcessOfflineState {
|
||||||
|
s.Log().Info("server suspended with running process state, terminating now")
|
||||||
|
|
||||||
|
if err := s.Environment.WaitForStop(10, true); err != nil {
|
||||||
|
s.Log().WithField("error", err).Warn("failed to terminate server environment after suspension")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.IsSuspended() {
|
||||||
// Update the environment in place, allowing memory and CPU usage to be adjusted
|
// Update the environment in place, allowing memory and CPU usage to be adjusted
|
||||||
// on the fly without the user needing to reboot (theoretically).
|
// on the fly without the user needing to reboot (theoretically).
|
||||||
go func(server *Server) {
|
s.Log().Info("performing server limit modification on-the-fly")
|
||||||
if err := server.Environment.InSituUpdate(); err != nil {
|
if err := s.Environment.InSituUpdate(); err != nil {
|
||||||
zap.S().Warnw(
|
s.Log().WithField("error", err).Warn("failed to perform on-the-fly update of the server environment")
|
||||||
"failed to perform in-situ update of server environment",
|
|
||||||
zap.String("server", server.Uuid),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}(s)
|
|
||||||
|
|
||||||
// Check if the server is now suspended, and if so and the process is not terminated
|
|
||||||
// yet, do it immediately.
|
|
||||||
go func(server *Server) {
|
|
||||||
if server.Suspended && server.GetState() != ProcessOfflineState {
|
|
||||||
zap.S().Infow("server suspended with running process state, terminating now", zap.String("server", server.Uuid))
|
|
||||||
|
|
||||||
if err := server.Environment.WaitForStop(10, true); err != nil {
|
|
||||||
zap.S().Warnw(
|
|
||||||
"failed to stop server environment after seeing suspension",
|
|
||||||
zap.String("server", server.Uuid),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}(s)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,14 @@
|
|||||||
package sftp
|
package sftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/pterodactyl/sftp-server"
|
"github.com/pterodactyl/sftp-server"
|
||||||
"github.com/pterodactyl/wings/api"
|
"github.com/pterodactyl/wings/api"
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
"path"
|
"regexp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Initialize(config *config.Configuration) error {
|
func Initialize(config *config.Configuration) error {
|
||||||
@@ -21,8 +22,6 @@ func Initialize(config *config.Configuration) error {
|
|||||||
ReadOnly: config.System.Sftp.ReadOnly,
|
ReadOnly: config.System.Sftp.ReadOnly,
|
||||||
BindAddress: config.System.Sftp.Address,
|
BindAddress: config.System.Sftp.Address,
|
||||||
BindPort: config.System.Sftp.Port,
|
BindPort: config.System.Sftp.Port,
|
||||||
ServerDataFolder: path.Join(config.System.Data, "/servers"),
|
|
||||||
DisableDiskCheck: config.System.Sftp.DisableDiskChecking,
|
|
||||||
},
|
},
|
||||||
CredentialValidator: validateCredentials,
|
CredentialValidator: validateCredentials,
|
||||||
PathValidator: validatePath,
|
PathValidator: validatePath,
|
||||||
@@ -41,7 +40,7 @@ func Initialize(config *config.Configuration) error {
|
|||||||
// a long running operation.
|
// a long running operation.
|
||||||
go func(instance *sftp_server.Server) {
|
go func(instance *sftp_server.Server) {
|
||||||
if err := c.Initalize(); err != nil {
|
if err := c.Initalize(); err != nil {
|
||||||
zap.S().Named("sftp").Errorw("failed to initialize SFTP subsystem", zap.Error(errors.WithStack(err)))
|
log.WithField("subsystem", "sftp").WithField("error", errors.WithStack(err)).Error("failed to initialize SFTP subsystem")
|
||||||
}
|
}
|
||||||
}(c)
|
}(c)
|
||||||
|
|
||||||
@@ -50,7 +49,7 @@ func Initialize(config *config.Configuration) error {
|
|||||||
|
|
||||||
func validatePath(fs sftp_server.FileSystem, p string) (string, error) {
|
func validatePath(fs sftp_server.FileSystem, p string) (string, error) {
|
||||||
s := server.GetServers().Find(func(server *server.Server) bool {
|
s := server.GetServers().Find(func(server *server.Server) bool {
|
||||||
return server.Uuid == fs.UUID
|
return server.Id() == fs.UUID
|
||||||
})
|
})
|
||||||
|
|
||||||
if s == nil {
|
if s == nil {
|
||||||
@@ -62,7 +61,7 @@ func validatePath(fs sftp_server.FileSystem, p string) (string, error) {
|
|||||||
|
|
||||||
func validateDiskSpace(fs sftp_server.FileSystem) bool {
|
func validateDiskSpace(fs sftp_server.FileSystem) bool {
|
||||||
s := server.GetServers().Find(func(server *server.Server) bool {
|
s := server.GetServers().Find(func(server *server.Server) bool {
|
||||||
return server.Uuid == fs.UUID
|
return server.Id() == fs.UUID
|
||||||
})
|
})
|
||||||
|
|
||||||
if s == nil {
|
if s == nil {
|
||||||
@@ -72,21 +71,48 @@ func validateDiskSpace(fs sftp_server.FileSystem) bool {
|
|||||||
return s.Filesystem.HasSpaceAvailable()
|
return s.Filesystem.HasSpaceAvailable()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var validUsernameRegexp = regexp.MustCompile(`^(?i)(.+)\.([a-z0-9]{8})$`)
|
||||||
|
|
||||||
// Validates a set of credentials for a SFTP login aganist Pterodactyl Panel and returns
|
// Validates a set of credentials for a SFTP login aganist Pterodactyl Panel and returns
|
||||||
// the server's UUID if the credentials were valid.
|
// the server's UUID if the credentials were valid.
|
||||||
func validateCredentials(c sftp_server.AuthenticationRequest) (*sftp_server.AuthenticationResponse, error) {
|
func validateCredentials(c sftp_server.AuthenticationRequest) (*sftp_server.AuthenticationResponse, error) {
|
||||||
|
log.WithFields(log.Fields{"subsystem": "sftp", "username": c.User}).Debug("validating credentials for SFTP connection")
|
||||||
|
|
||||||
|
f := log.Fields{
|
||||||
|
"subsystem": "sftp",
|
||||||
|
"username": c.User,
|
||||||
|
"ip": c.IP,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the username doesn't meet the expected format that the Panel would even recognize just go ahead
|
||||||
|
// and bail out of the process here to avoid accidentially brute forcing the panel if a bot decides
|
||||||
|
// to connect to spam username attempts.
|
||||||
|
if !validUsernameRegexp.MatchString(c.User) {
|
||||||
|
log.WithFields(f).Warn("failed to validate user credentials (invalid format)")
|
||||||
|
|
||||||
|
return nil, new(sftp_server.InvalidCredentialsError)
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := api.NewRequester().ValidateSftpCredentials(c)
|
resp, err := api.NewRequester().ValidateSftpCredentials(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if sftp_server.IsInvalidCredentialsError(err) {
|
||||||
|
log.WithFields(f).Warn("failed to validate user credentials (invalid username or password)")
|
||||||
|
} else {
|
||||||
|
log.WithFields(f).Error("encountered an error while trying to validate user credentials")
|
||||||
|
}
|
||||||
|
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
s := server.GetServers().Find(func(server *server.Server) bool {
|
s := server.GetServers().Find(func(server *server.Server) bool {
|
||||||
return server.Uuid == resp.Server
|
return server.Id() == resp.Server
|
||||||
})
|
})
|
||||||
|
|
||||||
if s == nil {
|
if s == nil {
|
||||||
return resp, errors.New("no server found with that UUID")
|
return resp, errors.New("no matching server with UUID found")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s.Log().WithFields(f).Debug("credentials successfully validated and matched user to server instance")
|
||||||
|
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
package system
|
package system
|
||||||
|
|
||||||
const (
|
var (
|
||||||
// The current version of this software.
|
// The current version of this software.
|
||||||
Version = "0.0.1"
|
Version = "0.0.1"
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user