Compare commits
276 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6902422229 | ||
|
|
5f5b2bc84e | ||
|
|
81a411a42c | ||
|
|
37c6b85489 | ||
|
|
0e3778ac47 | ||
|
|
ad2618bc6f | ||
|
|
fb2dc39a47 | ||
|
|
0919fb2da6 | ||
|
|
33f5cb7df4 | ||
|
|
8897be661b | ||
|
|
2a98faf360 | ||
|
|
b6871f87ec | ||
|
|
d5605abc15 | ||
|
|
2eb721bbe7 | ||
|
|
a8ee5463ce | ||
|
|
1e1549342f | ||
|
|
4ed0bf522b | ||
|
|
7541e769e0 | ||
|
|
00195b4a5b | ||
|
|
5c56ddf5d6 | ||
|
|
683c766d0f | ||
|
|
1da415c177 | ||
|
|
065da77afa | ||
|
|
e3b0b91912 | ||
|
|
bfff094216 | ||
|
|
98c68142cd | ||
|
|
62cbe5e135 | ||
|
|
6775c17324 | ||
|
|
aa287d21cf | ||
|
|
1393937904 | ||
|
|
de9b413bc2 | ||
|
|
ba6cec9615 | ||
|
|
4bd18f7dd8 | ||
|
|
adc0732af3 | ||
|
|
225f8aa904 | ||
|
|
df721f45f8 | ||
|
|
aca9ffc122 | ||
|
|
13541524c3 | ||
|
|
4411493006 | ||
|
|
bc79ce540e | ||
|
|
31757a68a9 | ||
|
|
f3a6ee7a45 | ||
|
|
ab86fb703a | ||
|
|
981071cda8 | ||
|
|
fecacc1339 | ||
|
|
6041636076 | ||
|
|
93506994a5 | ||
|
|
82f70c2755 | ||
|
|
5d070cbdc5 | ||
|
|
56af6fc1f8 | ||
|
|
8920f919b1 | ||
|
|
894f2055a0 | ||
|
|
f7788e10a0 | ||
|
|
bcca2550d1 | ||
|
|
17b46a4a8f | ||
|
|
3f84ee694b | ||
|
|
5021ea6a86 | ||
|
|
63dac51692 | ||
|
|
6ef0bd7496 | ||
|
|
66b6f40b61 | ||
|
|
7dd0acebc0 | ||
|
|
6a286fb444 | ||
|
|
464f26a2c9 | ||
|
|
2968ea3498 | ||
|
|
67ecbd667a | ||
|
|
b17cf5b93d | ||
|
|
f6669213e8 | ||
|
|
c2cfaf44b5 | ||
|
|
05c04c4350 | ||
|
|
80faea3286 | ||
|
|
9480ccdbba | ||
|
|
d45a159456 | ||
|
|
e9e70b6081 | ||
|
|
b10e4dd437 | ||
|
|
2c1b211280 | ||
|
|
3459c25be0 | ||
|
|
e396b88cb5 | ||
|
|
e0cf18299a | ||
|
|
1e15beb953 | ||
|
|
b0fa9619de | ||
|
|
0cb3b815d1 | ||
|
|
a48abc92ad | ||
|
|
c228acaafc | ||
|
|
1b8db12fde | ||
|
|
0414dbed8f | ||
|
|
4cefe74a47 | ||
|
|
c9c949a091 | ||
|
|
96256ac63e | ||
|
|
6701aa6dc1 | ||
|
|
ff8926bba8 | ||
|
|
abeb9655f9 | ||
|
|
8192244fec | ||
|
|
94f4207d60 | ||
|
|
217ca72eb3 | ||
|
|
648072436f | ||
|
|
6fe2468a5a | ||
|
|
948d927eb9 | ||
|
|
b2eaa3f7f8 | ||
|
|
93417dddb1 | ||
|
|
044c46fc9a | ||
|
|
c9d972d544 | ||
|
|
0aab4b1ac2 | ||
|
|
4f4b4fd2e6 | ||
|
|
66c9be357c | ||
|
|
1d36811dfe | ||
|
|
6e74123c65 | ||
|
|
b82f5f9a32 | ||
|
|
1937d0366d | ||
|
|
963a906c30 | ||
|
|
3f6eb7e41a | ||
|
|
a822c7c340 | ||
|
|
b8fb86f5a4 | ||
|
|
ee0c7f09b3 | ||
|
|
d3ddf8cf39 | ||
|
|
bf554e8ed2 | ||
|
|
d6e189df5e | ||
|
|
68749616ad | ||
|
|
7549eb13a0 | ||
|
|
902f9f5944 | ||
|
|
2cf24994d9 | ||
|
|
3a7c4822f8 | ||
|
|
b26db99ee7 | ||
|
|
de4d2f4724 | ||
|
|
640e30de8a | ||
|
|
e75118e0f0 | ||
|
|
a4c8b8714b | ||
|
|
ddb683efb6 | ||
|
|
5be6e20b03 | ||
|
|
1ba3631cc1 | ||
|
|
94d41bc1f5 | ||
|
|
a4c68eed16 | ||
|
|
22c53c365a | ||
|
|
59c30c2842 | ||
|
|
3842f054a5 | ||
|
|
8fa4c50379 | ||
|
|
510d46289b | ||
|
|
6e5b14c466 | ||
|
|
b3922864f2 | ||
|
|
31d4c1d34f | ||
|
|
0cdfdc725c | ||
|
|
bb132243ed | ||
|
|
d96115325a | ||
|
|
a450abc080 | ||
|
|
0a45ea44a4 | ||
|
|
e05c601325 | ||
|
|
5c78cb9ab3 | ||
|
|
901ab1157d | ||
|
|
c0523df696 | ||
|
|
f7f5623c71 | ||
|
|
184013b652 | ||
|
|
f8282c56cb | ||
|
|
c8d297a056 | ||
|
|
c718da20e3 | ||
|
|
9c53436470 | ||
|
|
17daa2071f | ||
|
|
3495fb1c76 | ||
|
|
0c93e5ed02 | ||
|
|
737e1fcef6 | ||
|
|
16118874cf | ||
|
|
b161ccafaf | ||
|
|
59a111de39 | ||
|
|
c0a641247b | ||
|
|
057cdbd927 | ||
|
|
0ecc166dcd | ||
|
|
3a26a5d39d | ||
|
|
9ae75a399b | ||
|
|
01b766dacc | ||
|
|
904e0a574d | ||
|
|
acd6dc62d0 | ||
|
|
8f26c31df6 | ||
|
|
84c05efaa5 | ||
|
|
d72d96f9d0 | ||
|
|
981f04fbd8 | ||
|
|
463dd6f4ec | ||
|
|
02034211c1 | ||
|
|
fafda283b1 | ||
|
|
199be20717 | ||
|
|
1c825d2a74 | ||
|
|
e936f22419 | ||
|
|
c253a4bac0 | ||
|
|
ecb15a224a | ||
|
|
73f1399e47 | ||
|
|
8e29ffed50 | ||
|
|
2d4dd05ec9 | ||
|
|
fb3460f5f6 | ||
|
|
cff7667155 | ||
|
|
7871c0928f | ||
|
|
04d714f19c | ||
|
|
a18f60bd05 | ||
|
|
deea5babbc | ||
|
|
e41b3dc09a | ||
|
|
6366794838 | ||
|
|
c01a39d881 | ||
|
|
3f2ce59766 | ||
|
|
bcf0c72e47 | ||
|
|
bd5892b70c | ||
|
|
70ea61f22f | ||
|
|
1b0c2e1764 | ||
|
|
3d532f6e0b | ||
|
|
81fd1a3758 | ||
|
|
b52c3fb61e | ||
|
|
bc3d92f9e6 | ||
|
|
ee08829a28 | ||
|
|
83f0d2c953 | ||
|
|
605be3ebad | ||
|
|
ca6dc2c964 | ||
|
|
dc41126e25 | ||
|
|
da4c542724 | ||
|
|
e7d93a5248 | ||
|
|
51aa4c73cd | ||
|
|
4a7510d36f | ||
|
|
ba0a1a651e | ||
|
|
068f41393d | ||
|
|
169e8b8f8b | ||
|
|
c6e2889075 | ||
|
|
f62f714863 | ||
|
|
da9ace5d9d | ||
|
|
97345123ce | ||
|
|
8aa9105ed3 | ||
|
|
e8088f85d0 | ||
|
|
0c8476c79b | ||
|
|
a7a66e8bc0 | ||
|
|
65a861a9b6 | ||
|
|
de51fd1c51 | ||
|
|
40c70673cd | ||
|
|
73b221d022 | ||
|
|
287b286940 | ||
|
|
1d0e85cf55 | ||
|
|
30ec6dc78d | ||
|
|
121a4d1146 | ||
|
|
b9be373671 | ||
|
|
aedd0e406c | ||
|
|
82b23ef638 | ||
|
|
d970ec35b7 | ||
|
|
e2872e786e | ||
|
|
f81e35d960 | ||
|
|
672fb860ea | ||
|
|
8081c83de4 | ||
|
|
f379d0e54a | ||
|
|
ffb6bd72ef | ||
|
|
488ef9de54 | ||
|
|
34349d4b48 | ||
|
|
2197d82957 | ||
|
|
20ece60a72 | ||
|
|
de0c67d066 | ||
|
|
68bdcb3cbc | ||
|
|
205c4d541e | ||
|
|
ef999a039c | ||
|
|
be9d1a3986 | ||
|
|
0989c78d4b | ||
|
|
1683675807 | ||
|
|
536f00a5e5 | ||
|
|
33e584b447 | ||
|
|
4b17ac4f1c | ||
|
|
944d381778 | ||
|
|
3fce1b98d5 | ||
|
|
a74be8f4eb | ||
|
|
af9ed4bff1 | ||
|
|
08d1efb475 | ||
|
|
65664b63e7 | ||
|
|
912d95de24 | ||
|
|
13c253780a | ||
|
|
fe572beada | ||
|
|
384b9a3c28 | ||
|
|
05cfb59e18 | ||
|
|
317e54acc5 | ||
|
|
5475cb02c1 | ||
|
|
1239b1c0ca | ||
|
|
b8598e90d4 | ||
|
|
fcccda2761 | ||
|
|
f67889c2ca | ||
|
|
b8766d3c82 | ||
|
|
ca3becfb55 | ||
|
|
41a67933eb | ||
|
|
334b3e8d10 | ||
|
|
c4703f5541 |
56
.dockerignore
Normal file
56
.dockerignore
Normal file
@@ -0,0 +1,56 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
.idea/*
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
# dep related files and folders
|
||||
/vendor*
|
||||
|
||||
# ignore logfiles (/* so the .gitkeep override works)
|
||||
/logs/*
|
||||
|
||||
# ignore configuration file
|
||||
/config.yml
|
||||
|
||||
# Ignore Vagrant stuff
|
||||
/.vagrant
|
||||
|
||||
# Builds by gox
|
||||
/build
|
||||
|
||||
# Go Coverage tool
|
||||
/coverage.out
|
||||
|
||||
# The built executable
|
||||
wings
|
||||
wings.exe
|
||||
|
||||
# IDE/Editor files (VS Code)
|
||||
/.vscode
|
||||
|
||||
# test files
|
||||
test_*/
|
||||
|
||||
# Keep all gitkeep files (This needs to stay at the bottom)
|
||||
!.gitkeep
|
||||
debug
|
||||
.DS_Store
|
||||
*.pprof
|
||||
*.pdf
|
||||
|
||||
Dockerfile
|
||||
CHANGELOG.md
|
||||
Makefile
|
||||
README.md
|
||||
wings-api.paw
|
||||
2
.github/FUNDING.yml
vendored
Normal file
2
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
github: [DaneEveritt]
|
||||
custom: ["https://paypal.me/PterodactylSoftware"]
|
||||
72
.github/workflows/build-test.yml
vendored
72
.github/workflows/build-test.yml
vendored
@@ -1,56 +1,66 @@
|
||||
name: "Build & Test"
|
||||
|
||||
name: Run Tests
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- 'master'
|
||||
- 'release/**'
|
||||
branches:
|
||||
- 'develop'
|
||||
pull_request:
|
||||
|
||||
branches:
|
||||
- 'develop'
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
# Default is true, cancels jobs for other platforms in the matrix if one fails
|
||||
fail-fast: false
|
||||
|
||||
matrix:
|
||||
os: [ ubuntu-20.04 ]
|
||||
go: [ 1.15 ]
|
||||
go: [ '^1.15', '^1.16' ]
|
||||
goos: [ linux ]
|
||||
goarch: [ amd64, arm, arm64 ]
|
||||
|
||||
goarch: [ amd64, arm64 ]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Setup Go v${{ matrix.go }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
- name: Print Environment
|
||||
id: env
|
||||
run: |
|
||||
printf "Go Executable Path: $(which go)\n"
|
||||
printf "Go Version: $(go version)\n"
|
||||
printf "\n\nGo Environment:\n\n"
|
||||
go env
|
||||
printf "\n\nSystem Environment:\n\n"
|
||||
env
|
||||
|
||||
echo "::set-output name=version_tag::${GITHUB_REF/refs\/tags\//}"
|
||||
echo "::set-output name=short_sha::$(git rev-parse --short HEAD)"
|
||||
echo "::set-output name=go_cache::$(go env GOCACHE)"
|
||||
- name: Build Cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.env.outputs.go_cache }}
|
||||
key: ${{ runner.os }}-${{ matrix.go }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.go }}-go
|
||||
- name: Get Dependencies
|
||||
run: |
|
||||
go get -v -t -d ./...
|
||||
- name: Build
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: 0
|
||||
SRC_PATH: github.com/pterodactyl/wings
|
||||
run: |
|
||||
go build -v -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${{ matrix.goos }}_${{ matrix.goarch }} wings.go
|
||||
|
||||
go build -v -trimpath -ldflags="-s -w -X ${SRC_PATH}/system.Version=dev-${GIT_COMMIT:0:7}" -o build/wings_${{ matrix.goos }}_${{ matrix.goarch }} wings.go
|
||||
upx build/wings_${{ matrix.goos }}_${{ matrix.goarch }}
|
||||
chmod +x build/wings_${{ matrix.goos }}_${{ matrix.goarch }}
|
||||
- name: Test
|
||||
run: go test ./...
|
||||
|
||||
- name: Compress binary and make it executable
|
||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
upx build/wings_${{ matrix.goos }}_${{ matrix.goarch }} && chmod +x build/wings_${{ matrix.goos }}_${{ matrix.goarch }}
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
||||
with:
|
||||
name: wings_${{ matrix.goos }}_${{ matrix.goarch }}
|
||||
path: build/wings_${{ matrix.goos }}_${{ matrix.goarch }}
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v2
|
||||
if: ${{ matrix.go == '^1.15' && (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') }}
|
||||
with:
|
||||
name: wings_${{ matrix.goos }}_${{ matrix.goarch }}
|
||||
path: build/wings_${{ matrix.goos }}_${{ matrix.goarch }}
|
||||
|
||||
40
.github/workflows/codeql-analysis.yml
vendored
40
.github/workflows/codeql-analysis.yml
vendored
@@ -1,35 +1,31 @@
|
||||
name: "Code scanning - action"
|
||||
|
||||
name: CodeQL
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'develop'
|
||||
schedule:
|
||||
- cron: '0 21 * * 6'
|
||||
|
||||
- cron: '0 9 * * 4'
|
||||
jobs:
|
||||
CodeQL-Build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language:
|
||||
- go
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We must fetch at least the immediate parents so that if this is
|
||||
# a pull request then we can checkout the head.
|
||||
fetch-depth: 2
|
||||
|
||||
# If this run was triggered by a pull request event, then checkout
|
||||
# the head of the pull request instead of the merge commit.
|
||||
- run: git checkout HEAD^2
|
||||
- name: Checkout Head
|
||||
run: git checkout HEAD^2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
# Override language selection by uncommenting this and choosing your languages
|
||||
with:
|
||||
languages: go
|
||||
|
||||
languages: ${{ matrix.language }}
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
|
||||
57
.github/workflows/docker.yml
vendored
Normal file
57
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: Publish Docker Image
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
|
||||
tags:
|
||||
- 'v*'
|
||||
jobs:
|
||||
push:
|
||||
name: Push
|
||||
runs-on: ubuntu-20.04
|
||||
# Always run against a tag, even if the commit into the tag has [docker skip] within the commit message.
|
||||
if: "!contains(github.ref, 'develop') || (!contains(github.event.head_commit.message, 'skip docker') && !contains(github.event.head_commit.message, 'docker skip'))"
|
||||
steps:
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Docker Meta
|
||||
id: docker_meta
|
||||
uses: crazy-max/ghaction-docker-meta@v1
|
||||
with:
|
||||
images: ghcr.io/pterodactyl/wings
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
- name: Install buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
version: v0.5.1
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
- name: Get Build Information
|
||||
id: build_info
|
||||
run: |
|
||||
echo "::set-output name=version_tag::${GITHUB_REF/refs\/tags\//}"
|
||||
echo "::set-output name=short_sha::$(git rev-parse --short HEAD)"
|
||||
- name: Release Production Build
|
||||
uses: docker/build-push-action@v2
|
||||
if: "!contains(github.ref, 'develop')"
|
||||
with:
|
||||
build-args: |
|
||||
VERSION=${{ steps.build_info.outputs.version_tag }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||
push: true
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
- name: Release Development Build
|
||||
uses: docker/build-push-action@v2
|
||||
if: "contains(github.ref, 'develop')"
|
||||
with:
|
||||
build-args: |
|
||||
VERSION=dev-${{ steps.build_info.outputs.short_sha }}
|
||||
labels: ${{ steps.docker_meta.outputs.labels }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||
44
.github/workflows/release.yml
vendored
44
.github/workflows/release.yml
vendored
@@ -1,53 +1,41 @@
|
||||
name: "Release"
|
||||
|
||||
name: Create Release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Code Checkout
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15.2'
|
||||
|
||||
go-version: '^1.15'
|
||||
- name: Build
|
||||
env:
|
||||
REF: ${{ github.ref }}
|
||||
run: |
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${REF:11}" -o build/wings_linux_amd64 -v wings.go
|
||||
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${REF:11}" -o build/wings_linux_arm64 -v wings.go
|
||||
GOOS=linux GOARCH=arm go build -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${REF:11}" -o build/wings_linux_arm -v wings.go
|
||||
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${REF:11}" -o build/wings_linux_amd64 -v wings.go
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=${REF:11}" -o build/wings_linux_arm64 -v wings.go
|
||||
- name: Test
|
||||
run: go test ./...
|
||||
|
||||
- name: Compress binary and make it executable
|
||||
run: |
|
||||
upx --brute build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
||||
upx build/wings_linux_amd64 && chmod +x build/wings_linux_amd64
|
||||
upx build/wings_linux_arm64 && chmod +x build/wings_linux_arm64
|
||||
upx build/wings_linux_arm && chmod +x build/wings_linux_arm
|
||||
|
||||
- name: Extract changelog
|
||||
env:
|
||||
REF: ${{ github.ref }}
|
||||
run: |
|
||||
sed -n "/^## ${REF:10}/,/^## /{/^## /b;p}" CHANGELOG.md > ./RELEASE_CHANGELOG
|
||||
echo ::set-output name=version_name::`sed -nr "s/^## (${REF:10} .*)$/\1/p" CHANGELOG.md`
|
||||
|
||||
- name: Create checksum and add to changelog
|
||||
run: |
|
||||
SUM=`cd build && sha256sum wings_linux_amd64`
|
||||
SUM2=`cd build && sha256sum wings_linux_arm64`
|
||||
SUM3=`cd build && sha256sum wings_linux_arm`
|
||||
echo -e "\n#### SHA256 Checksum\n\`\`\`\n$SUM\n$SUM2\n$SUM3\n\`\`\`\n" >> ./RELEASE_CHANGELOG
|
||||
echo -e "$SUM\n$SUM2\n$SUM3" > checksums.txt
|
||||
|
||||
echo -e "\n#### SHA256 Checksum\n\`\`\`\n$SUM\n$SUM2\n\`\`\`\n" >> ./RELEASE_CHANGELOG
|
||||
echo -e "$SUM\n$SUM2" > checksums.txt
|
||||
- name: Create release branch
|
||||
env:
|
||||
REF: ${{ github.ref }}
|
||||
@@ -61,7 +49,6 @@ jobs:
|
||||
git add system/const.go
|
||||
git commit -m "bump version for release"
|
||||
git push
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
@@ -73,7 +60,6 @@ jobs:
|
||||
body_path: ./RELEASE_CHANGELOG
|
||||
draft: true
|
||||
prerelease: ${{ contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
|
||||
|
||||
- name: Upload amd64 Binary
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
@@ -83,7 +69,6 @@ jobs:
|
||||
asset_path: build/wings_linux_amd64
|
||||
asset_name: wings_linux_amd64
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
- name: Upload arm64 Binary
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
@@ -93,17 +78,6 @@ jobs:
|
||||
asset_path: build/wings_linux_arm64
|
||||
asset_name: wings_linux_arm64
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
- name: Upload arm Binary
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: build/wings_linux_arm
|
||||
asset_name: wings_linux_arm
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
- name: Upload checksum
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -22,6 +22,7 @@
|
||||
|
||||
# ignore configuration file
|
||||
/config.yml
|
||||
/config*.yml
|
||||
|
||||
# Ignore Vagrant stuff
|
||||
/.vagrant
|
||||
@@ -45,5 +46,6 @@ test_*/
|
||||
# Keep all gitkeep files (This needs to stay at the bottom)
|
||||
!.gitkeep
|
||||
debug
|
||||
data/.states.json
|
||||
.DS_Store
|
||||
*.pprof
|
||||
*.pdf
|
||||
|
||||
47
.travis.yml
47
.travis.yml
@@ -1,47 +0,0 @@
|
||||
os: linux
|
||||
dist: xenial
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.13.x
|
||||
|
||||
go_import_path: "github.com/pterodactyl/wings"
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
install:
|
||||
- mkdir -p $GOPATH/bin
|
||||
|
||||
# Install used tools
|
||||
- go get github.com/mitchellh/gox
|
||||
- go get github.com/haya14busa/goverage
|
||||
- go get github.com/schrej/godacov
|
||||
|
||||
- go mod download
|
||||
|
||||
script:
|
||||
- make cross-build
|
||||
- goverage -v -coverprofile=coverage.out ./...
|
||||
- godacov -t $CODACY_TOKEN -r ./coverage.out -c $TRAVIS_COMMIT
|
||||
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key:
|
||||
secure: HQ8AvnSsOW2aDUKv25sU83SswK9rReGeFi68SotLGPdWyFBWJbp/JEHhw9swSqvhLPykx5QqLnRPG4nomOp2i5dVTXgM/7C3wQ2ULymkJDZqDJEAxjm1IuNsjXgcFqp0zcNXL3g0moaorHS2XZpzbgaewlCyYoEb+3SZUGzOCPIjSFvoIBaAYx6kRn+pyWo1I0mQChno2i7SGvAoZwh/hZIO6L5FZe5PcpBs/SxkZ+/shsGMk7CIyNMhG6CQTE1tlr+ZenluXjtliZfc4XwkHG/9MICNl8ihUrnN6YfdvJZXLQvolZQ0QJ5Eyb04jQd1yzKR1hcLx2S42IAWxaWTy5QxSN8QyG5wBRNg567ib5FEqY4M1nyQnWQbUbaiYloYBp14aR1L9DQw8+xmXnlgnTUPq1w+cOpQLeY/RENCalgHe7NoI3lClC2b7/c1j+O7RA68yYUFUod0y7ZXcCwsJkbRk7xgyDEAGs+rq8wLknj6f8y8cfNm179lRARwblnmo9uA43Tlee8DBSziSvJy/mYMzdIQeb+PHuznXjr4fze7x+zvronkiD/JH8MjJl3SWaE7DGtc5jz4+aRxU3rMbHwToEOY6u5pIsvz5PRFYWBvKX2+VoxmdR+m1qhAxsg0wtbA0CTnqgHNGMIFDWVTDQSy8LvJt+usUn1RtrYyyiI=
|
||||
file_glob: true
|
||||
file: build/*
|
||||
on:
|
||||
tags: true
|
||||
branch: master
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
webhooks:
|
||||
urls:
|
||||
- https://misc.schrej.net/travistodiscord/pterodev.php
|
||||
on_success: change
|
||||
on_failure: always
|
||||
on_error: always
|
||||
on_cancel: always
|
||||
on_start: never
|
||||
128
CHANGELOG.md
128
CHANGELOG.md
@@ -1,6 +1,132 @@
|
||||
# Changelog
|
||||
|
||||
## next
|
||||
## v1.3.0
|
||||
### Fixed
|
||||
* Fixes an error being returned to the client when attempting to restart a server when the container no longer exists on the machine.
|
||||
|
||||
### Changed
|
||||
* Updated server transfer logic to use newer file archiving tools to avoid frequent errors when transferring symlinked files.
|
||||
|
||||
## v1.3.0
|
||||
### Fixed
|
||||
* Fixes improper error handling when attempting to create a new Docker network.
|
||||
* Fixes edge-case crash that would occur when a user triggers an install for a server that does not currently have a data directory present on the system.
|
||||
* Fixes missing return on error when attempting to get the contents of a file from Wings.
|
||||
* Fixes certain stop signals not being properly handled and parsed by Wings.
|
||||
* Fixes server build settings not always being updated properly if set to their zero-value.
|
||||
* Fixes context leak when waiting on a server instance to be stopped.
|
||||
* Fix potential application panic when chowning a file if there is an error getting file details.
|
||||
* Fixes `Filesystem.Chown` unintentionally touching all of the files within a given directory tree which could cause some games to trigger a full refresh thinking files had been changed.
|
||||
* Fixes `Content-Disposition` header not being properly escaped causing some browsers to not report the correct filename on downloads.
|
||||
|
||||
### Added
|
||||
* Adds support for restoring server backups (including remote backups) with the ability to reset the current file state for a server.
|
||||
* Adds underlying support for allowing Eggs to mark specific files (or patterns) as being inaccessible to users within the file manager.
|
||||
|
||||
### Changed
|
||||
* Refactored SFTP subsystem to be less of a standalone package and more integrated with the underlying server logic in Wings. This significantly simplified the logic and makes it much easier to reason about.
|
||||
* Refactored much of the underlying API logic to be more extensible down the road, support automatic retries, and be more testable.
|
||||
* Refactored much of the underlying HTTP middleware logic to be packaged differently and easier to reason about in the codebase.
|
||||
* System defined `TZ` variable will be used if present rather than attempting to parse the timezone using `datetimectl`.
|
||||
* Improves error handling and reporting for server installation process to improve debugging in the future if things break.
|
||||
|
||||
## v1.2.3
|
||||
### Fixed
|
||||
* **[Security]** Fixes a remaining security vulnerability in the code handling remote file downloads for servers relating to redirect validation.
|
||||
|
||||
### Added
|
||||
* Adds a configuration key at `api.disable_remote_download` that can be set to `true` to completely download the remote download system.
|
||||
|
||||
## v1.2.2
|
||||
### Fixed
|
||||
* Reverts changes to logic handling blocking until a server process is done running when polling stats. This change exposed a bug in the underlying Docker system causing servers to enter a state in which Wings was unable to terminate the process and Docker commands would hang if executed against the container.
|
||||
|
||||
### Changed
|
||||
* Adds logic to handle a console stream unexpectedly returning an EOF when reading console logs. New code should automatically re-attach the stream avoiding issues where the console would stop live updating for servers.
|
||||
|
||||
## v1.2.1
|
||||
### Fixed
|
||||
* Fixes servers not be properly marked as no longer transfering if an error occurs during the archive process.
|
||||
* Fixes problems with user detection when running Wings inside a Docker container.
|
||||
* Fixes filename decoding issues with multiple endpoints related to the file manager (namely move/copy/delete).
|
||||
* **[Security]** Fixes vulnerability allowing a malicious user to abuse the remote file download utilitity to scan or access resources on the local network.
|
||||
* Fixes network `tx` stats not correctly being reported (was previously reporting `rx` for both `rx` and `tx`).
|
||||
|
||||
### Changed
|
||||
* Cleans up the logic related to polling resources for the server to make a little more sense and not do pointless `io.Copy()` operations.
|
||||
|
||||
## v1.2.0
|
||||
### Fixed
|
||||
* Fixes log compression being set on the Docker containers being created to avoid errors on some versions of Docker.
|
||||
* Cleaned up logic handling server resource usage to avoid race conditions in the future and make the logic simpler.
|
||||
* Fixes directories being created when writing a file before checking if there was space for the file to even be written to the disk.
|
||||
* Significant performance and resource usage fixes to backups and server transfers to avoid obliterating machine `i/o` and causing excessive resource exhaustion on busy systems or low end machines.
|
||||
* Fixes server install process to not unintentionally exit and cause invalid states if a line during the install process was too long.
|
||||
* Fixes symlink error handling in backups to not unexpectedly tank a request. Any errors due to a symlink are now ignored and will not impact the generation of a backup (including for server transfers).
|
||||
|
||||
### Changed
|
||||
* Changed `--debug` flag to no longer ignore certificate errors on requests. Use `--ignore-certificate-errors` to ignore any certificate errors encountered when in development environments.
|
||||
* Changed all Filesystem related errors to be of the same internal error type making error checking significantly easier and less error prone.
|
||||
* Improves log output stacktraces to be more accurate as to the source of the issue.
|
||||
|
||||
### Added
|
||||
* Adds support for downloading files to a server's data directory and optionally checking the status of or canceling in-progress downloads.
|
||||
* Adds a `context.Context` to `server.Server` structs allowing for cancelation of long running background tasks when a server is deleted without additional complexity on developer's end.
|
||||
|
||||
## v1.1.3
|
||||
### Fixed
|
||||
* Fixes `archive/tar: write too long` error when creating a server backup.
|
||||
* Fixes server installation docker images not using authentication properly during the pull.
|
||||
* Fixes temporary transfer files not being removed after the transfer is completed.
|
||||
* Fixes TLS certificate checking to be all lowercase to avoid any lookup issues when an all-caps domain is provided.
|
||||
* Fixes multiple interfaces with the same port not being publishable for a server.
|
||||
* Fixes errors encountered during websocket processes being incorrectly passed back to the Panel as a JWT error rather than a generic Wings error for admin users.
|
||||
|
||||
### Added
|
||||
* Added logic to notify the Panel when archive generation fails.
|
||||
* Added endpoint to run `chmod` commands against server files and updated API response to include the mode bits when requesting files.
|
||||
|
||||
### Changed
|
||||
* Updated internals to call `Server.Environment.State()` rather than deprecated `Server.GetState()` functions.
|
||||
* Improved error handling logic and massively simplified error passing around the codebase.
|
||||
|
||||
## v1.1.2
|
||||
### Fixed
|
||||
* Fixes binaries built as part of the release process not being usable in MUSL based environments (such as our Docker images).
|
||||
* Fixes server states being incorrectly set back to offline when a server is started after a system restart.
|
||||
|
||||
### Changed
|
||||
* Improved logic for cleaning `allowed_mount` paths for consistency.
|
||||
* Certain context cancelation deadline errors are no longer wrong reported at an error level (since they're expected).
|
||||
* Very minor micro-optimizations for some string handling with server console output.
|
||||
|
||||
### Added
|
||||
* Added a hidden option to disable all disk checking for servers by setting the `disk_check_interval` to `0` in the config file.
|
||||
|
||||
## v1.1.1
|
||||
### Fixed
|
||||
* Fixes certain files returning invalid data in the request due to a bad header set after sending data down the line.
|
||||
|
||||
## v1.1.0
|
||||
This release **requires** `Panel@1.1.0` or later to run due to API changes.
|
||||
|
||||
### Added
|
||||
* Adds support for denying client JWT access to specific token keys generated before Wings starts, or before an arbitrary date from an API call.
|
||||
* Adds support for a configurable number of log messages to be returned when connecting to a server socket and requesting the logs.
|
||||
* Adds support for both CPU and Memory profiling of Wings via a CLI argument.
|
||||
|
||||
### Fixed
|
||||
* Errors encountered while uploading files to Wings are now properly reported back to the client rather than causing a generic 500 error.
|
||||
* Servers exceeding their disk limit are now properly stopped when they exceed limits while running.
|
||||
* Fixes server environment starting as an empty value rather than an "offline" value.
|
||||
|
||||
### Changed
|
||||
* Cleaned up code internals for handling API requests to make it easier on new developers and use a more sane system.
|
||||
* Server configuration retrieval from the Panel is now done in a paginated loop rather than a single large call to allow systems with thousands of instances to boot properly.
|
||||
* Switches to multipart S3 uploads to handle backups larger than 5GB in size.
|
||||
* Switches the error handling package from `pkg/errors` to `emperror` to avoid overwriting existing stack traces associated with an error and provide additional functionality.
|
||||
|
||||
## v1.0.1
|
||||
### Added
|
||||
* Adds support for ARM to build outputs for wings.
|
||||
|
||||
|
||||
30
Dockerfile
30
Dockerfile
@@ -1,14 +1,22 @@
|
||||
# ----------------------------------
|
||||
# Pterodactyl Panel Dockerfile
|
||||
# ----------------------------------
|
||||
# Stage 1 (Build)
|
||||
FROM golang:1.15-alpine3.12 AS builder
|
||||
|
||||
FROM golang:1.15-alpine
|
||||
COPY . /go/wings/
|
||||
WORKDIR /go/wings/
|
||||
RUN apk add --no-cache upx \
|
||||
&& go build -ldflags="-s -w" \
|
||||
&& upx --brute wings
|
||||
ARG VERSION
|
||||
RUN apk add --update --no-cache git=2.26.2-r0 make=4.3-r0 upx=3.96-r0
|
||||
WORKDIR /app/
|
||||
COPY go.mod go.sum /app/
|
||||
RUN go mod download
|
||||
COPY . /app/
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \
|
||||
-ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=$VERSION" \
|
||||
-v \
|
||||
-trimpath \
|
||||
-o wings \
|
||||
wings.go
|
||||
RUN upx wings
|
||||
|
||||
FROM alpine:latest
|
||||
COPY --from=0 /go/wings/wings /usr/bin/
|
||||
# Stage 2 (Final)
|
||||
FROM busybox:1.33.0
|
||||
RUN echo "ID=\"busybox\"" > /etc/os-release
|
||||
COPY --from=builder /app/wings /usr/bin/
|
||||
CMD [ "wings", "--config", "/etc/pterodactyl/config.yml" ]
|
||||
12
Makefile
12
Makefile
@@ -1,7 +1,19 @@
|
||||
GIT_HEAD = $(shell git rev-parse HEAD | head -c8)
|
||||
|
||||
build:
|
||||
GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_amd64 -v wings.go
|
||||
GOOS=linux GOARCH=arm64 go build -ldflags="-s -w" -gcflags "all=-trimpath=$(pwd)" -o build/wings_linux_arm64 -v wings.go
|
||||
|
||||
debug:
|
||||
go build -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
|
||||
sudo ./wings --debug --ignore-certificate-errors --config config.yml
|
||||
|
||||
# Runs a remotly debuggable session for Wings allowing an IDE to connect and target
|
||||
# different breakpoints.
|
||||
rmdebug:
|
||||
go build -gcflags "all=-N -l" -ldflags="-X github.com/pterodactyl/wings/system.Version=$(GIT_HEAD)" -race
|
||||
sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml
|
||||
|
||||
compress:
|
||||
upx --brute build/wings_*
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ I would like to extend my sincere thanks to the following sponsors for helping f
|
||||
| [**MineStrator**](https://minestrator.com/) | Looking for a French highend hosting company for you minecraft server? More than 14,000 members on our discord, trust us. |
|
||||
| [**DedicatedMC**](https://dedicatedmc.io/) | DedicatedMC provides Raw Power hosting at affordable pricing, making sure to never compromise on your performance and giving you the best performance money can buy. |
|
||||
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
|
||||
| [**XCORE-SERVER.de**](https://xcore-server.de/) | XCORE-SERVER.de offers High-End Servers for hosting and gaming since 2012. Fast, excellent and well-known for eSports Gaming. |
|
||||
| [**XCORE**](https://xcore-server.de/) | XCORE offers High-End Servers for hosting and gaming since 2012. Fast, excellent and well-known for eSports Gaming. |
|
||||
| [**RoyaleHosting**](https://royalehosting.net/) | Build your dreams and deploy them with RoyaleHosting’s reliable servers and network. Easy to use, provisioned in a couple of minutes. |
|
||||
| [**Spill Hosting**](https://spillhosting.no/) | Spill Hosting is a Norwegian hosting service, which aims to cheap services on quality servers. Premium i9-9900K processors will run your game like a dream. |
|
||||
| [**DeinServerHost**](https://deinserverhost.de/) | DeinServerHost offers Dedicated, vps and Gameservers for many popular Games like Minecraft and Rust in Germany since 2013. |
|
||||
|
||||
177
api/api.go
177
api/api.go
@@ -1,177 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Initializes the requester instance.
|
||||
func NewRequester() *PanelRequest {
|
||||
return &PanelRequest{
|
||||
Response: nil,
|
||||
}
|
||||
}
|
||||
|
||||
type PanelRequest struct {
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
// Builds the base request instance that can be used with the HTTP client.
|
||||
func (r *PanelRequest) GetClient() *http.Client {
|
||||
return &http.Client{Timeout: time.Second * 30}
|
||||
}
|
||||
|
||||
func (r *PanelRequest) SetHeaders(req *http.Request) *http.Request {
|
||||
req.Header.Set("Accept", "application/vnd.pterodactyl.v1+json")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s.%s", config.Get().AuthenticationTokenId, config.Get().AuthenticationToken))
|
||||
|
||||
return req
|
||||
}
|
||||
|
||||
func (r *PanelRequest) GetEndpoint(endpoint string) string {
|
||||
return fmt.Sprintf(
|
||||
"%s/api/remote/%s",
|
||||
strings.TrimSuffix(config.Get().PanelLocation, "/"),
|
||||
strings.TrimPrefix(strings.TrimPrefix(endpoint, "/"), "api/remote/"),
|
||||
)
|
||||
}
|
||||
|
||||
// Logs the request into the debug log with all of the important request bits.
|
||||
// The authorization key will be cleaned up before being output.
|
||||
func (r *PanelRequest) logDebug(req *http.Request) {
|
||||
headers := make(map[string][]string)
|
||||
for k, v := range req.Header {
|
||||
if k != "Authorization" || len(v) == 0 {
|
||||
headers[k] = v
|
||||
continue
|
||||
}
|
||||
|
||||
headers[k] = []string{v[0][0:15] + "(redacted)"}
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"method": req.Method,
|
||||
"endpoint": req.URL.String(),
|
||||
"headers": headers,
|
||||
}).Debug("making request to external HTTP endpoint")
|
||||
}
|
||||
|
||||
func (r *PanelRequest) Get(url string) (*http.Response, error) {
|
||||
c := r.GetClient()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, r.GetEndpoint(url), nil)
|
||||
req = r.SetHeaders(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.logDebug(req)
|
||||
|
||||
return c.Do(req)
|
||||
}
|
||||
|
||||
func (r *PanelRequest) Post(url string, data []byte) (*http.Response, error) {
|
||||
c := r.GetClient()
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, r.GetEndpoint(url), bytes.NewBuffer(data))
|
||||
req = r.SetHeaders(req)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.logDebug(req)
|
||||
|
||||
return c.Do(req)
|
||||
}
|
||||
|
||||
// Determines if the API call encountered an error. If no request has been made
|
||||
// the response will be false.
|
||||
func (r *PanelRequest) HasError() bool {
|
||||
if r.Response == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return r.Response.StatusCode >= 300 || r.Response.StatusCode < 200
|
||||
}
|
||||
|
||||
// Reads the body from the response and returns it, then replaces it on the response
|
||||
// so that it can be read again later.
|
||||
func (r *PanelRequest) ReadBody() ([]byte, error) {
|
||||
var b []byte
|
||||
if r.Response == nil {
|
||||
return nil, errors.New("no response exists on interface")
|
||||
}
|
||||
|
||||
if r.Response.Body != nil {
|
||||
b, _ = ioutil.ReadAll(r.Response.Body)
|
||||
}
|
||||
|
||||
r.Response.Body = ioutil.NopCloser(bytes.NewBuffer(b))
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (r *PanelRequest) HttpResponseCode() int {
|
||||
if r.Response == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return r.Response.StatusCode
|
||||
}
|
||||
|
||||
func IsRequestError(err error) bool {
|
||||
_, ok := err.(*RequestError)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
type RequestError struct {
|
||||
response *http.Response
|
||||
Code string `json:"code"`
|
||||
Status string `json:"status"`
|
||||
Detail string `json:"detail"`
|
||||
}
|
||||
|
||||
// Returns the error response in a string form that can be more easily consumed.
|
||||
func (re *RequestError) Error() string {
|
||||
return fmt.Sprintf("Error response from Panel: %s: %s (HTTP/%d)", re.Code, re.Detail, re.response.StatusCode)
|
||||
}
|
||||
|
||||
func (re *RequestError) String() string {
|
||||
return re.Error()
|
||||
}
|
||||
|
||||
type RequestErrorBag struct {
|
||||
Errors []RequestError `json:"errors"`
|
||||
}
|
||||
|
||||
// Returns the error message from the API call as a string. The error message will be formatted
|
||||
// similar to the below example:
|
||||
//
|
||||
// HttpNotFoundException: The requested resource does not exist. (HTTP/404)
|
||||
func (r *PanelRequest) Error() *RequestError {
|
||||
body, _ := r.ReadBody()
|
||||
|
||||
bag := RequestErrorBag{}
|
||||
json.Unmarshal(body, &bag)
|
||||
|
||||
e := new(RequestError)
|
||||
if len(bag.Errors) > 0 {
|
||||
e = &bag.Errors[0]
|
||||
}
|
||||
|
||||
e.response = r.Response
|
||||
|
||||
return e
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type BackupRequest struct {
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
Size int64 `json:"size"`
|
||||
Successful bool `json:"successful"`
|
||||
}
|
||||
|
||||
// Notifies the panel that a specific backup has been completed and is now
|
||||
// available for a user to view and download.
|
||||
func (r *PanelRequest) SendBackupStatus(backup string, data BackupRequest) (*RequestError, error) {
|
||||
b, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
resp, err := r.Post(fmt.Sprintf("/backups/%s", backup), b)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
if r.HasError() {
|
||||
return r.Error(), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/apex/log"
|
||||
"github.com/pterodactyl/wings/parser"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type OutputLineMatcher struct {
|
||||
// The raw string to match against. This may or may not be prefixed with
|
||||
// regex: which indicates we want to match against the regex expression.
|
||||
raw string
|
||||
reg *regexp.Regexp
|
||||
}
|
||||
|
||||
// Determine if a given string "s" matches the given line.
|
||||
func (olm *OutputLineMatcher) Matches(s string) bool {
|
||||
if olm.reg == nil {
|
||||
return strings.Contains(s, olm.raw)
|
||||
}
|
||||
|
||||
return olm.reg.MatchString(s)
|
||||
}
|
||||
|
||||
// Return the matcher's raw comparison string.
|
||||
func (olm *OutputLineMatcher) String() string {
|
||||
return olm.raw
|
||||
}
|
||||
|
||||
// Unmarshal the startup lines into individual structs for easier matching abilities.
|
||||
func (olm *OutputLineMatcher) UnmarshalJSON(data []byte) error {
|
||||
if err := json.Unmarshal(data, &olm.raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(olm.raw, "regex:") && len(olm.raw) > 6 {
|
||||
r, err := regexp.Compile(strings.TrimPrefix(olm.raw, "regex:"))
|
||||
if err != nil {
|
||||
log.WithField("error", err).WithField("raw", olm.raw).Warn("failed to compile output line marked as being regex")
|
||||
}
|
||||
|
||||
olm.reg = r
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ProcessStopConfiguration struct {
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// Defines the process configuration for a given server instance. This sets what the
|
||||
// daemon is looking for to mark a server as done starting, what to do when stopping,
|
||||
// and what changes to make to the configuration file for a server.
|
||||
type ProcessConfiguration struct {
|
||||
Startup struct {
|
||||
Done []*OutputLineMatcher `json:"done"`
|
||||
UserInteraction []string `json:"user_interaction"`
|
||||
StripAnsi bool `json:"strip_ansi"`
|
||||
} `json:"startup"`
|
||||
|
||||
Stop ProcessStopConfiguration `json:"stop"`
|
||||
|
||||
ConfigurationFiles []parser.ConfigurationFile `json:"configs"`
|
||||
}
|
||||
@@ -1,188 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
ProcessStopCommand = "command"
|
||||
ProcessStopSignal = "signal"
|
||||
ProcessStopNativeStop = "stop"
|
||||
)
|
||||
|
||||
// Holds the server configuration data returned from the Panel. When a server process
|
||||
// is started, Wings communicates with the Panel to fetch the latest build information
|
||||
// as well as get all of the details needed to parse the given Egg.
|
||||
//
|
||||
// This means we do not need to hit Wings each time part of the server is updated, and
|
||||
// the Panel serves as the source of truth at all times. This also means if a configuration
|
||||
// is accidentally wiped on Wings we can self-recover without too much hassle, so long
|
||||
// as Wings is aware of what servers should exist on it.
|
||||
type ServerConfigurationResponse struct {
|
||||
Settings json.RawMessage `json:"settings"`
|
||||
ProcessConfiguration *ProcessConfiguration `json:"process_configuration"`
|
||||
}
|
||||
|
||||
// Defines installation script information for a server process. This is used when
|
||||
// a server is installed for the first time, and when a server is marked for re-installation.
|
||||
type InstallationScript struct {
|
||||
ContainerImage string `json:"container_image"`
|
||||
Entrypoint string `json:"entrypoint"`
|
||||
Script string `json:"script"`
|
||||
}
|
||||
|
||||
// GetAllServerConfigurations fetches configurations for all servers assigned to this node.
|
||||
func (r *PanelRequest) GetAllServerConfigurations() (map[string]json.RawMessage, *RequestError, error) {
|
||||
resp, err := r.Get("/servers")
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithStack(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
|
||||
if r.HasError() {
|
||||
return nil, r.Error(), nil
|
||||
}
|
||||
|
||||
b, _ := r.ReadBody()
|
||||
res := map[string]json.RawMessage{}
|
||||
if len(b) == 2 {
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &res); err != nil {
|
||||
return nil, nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
// Fetches the server configuration and returns the struct for it.
|
||||
func (r *PanelRequest) GetServerConfiguration(uuid string) (ServerConfigurationResponse, *RequestError, error) {
|
||||
res := ServerConfigurationResponse{}
|
||||
|
||||
resp, err := r.Get(fmt.Sprintf("/servers/%s", uuid))
|
||||
if err != nil {
|
||||
return res, nil, errors.WithStack(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
if r.HasError() {
|
||||
return res, r.Error(), nil
|
||||
}
|
||||
|
||||
b, _ := r.ReadBody()
|
||||
if err := json.Unmarshal(b, &res); err != nil {
|
||||
return res, nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
// Fetches installation information for the server process.
|
||||
func (r *PanelRequest) GetInstallationScript(uuid string) (InstallationScript, *RequestError, error) {
|
||||
res := InstallationScript{}
|
||||
|
||||
resp, err := r.Get(fmt.Sprintf("/servers/%s/install", uuid))
|
||||
if err != nil {
|
||||
return res, nil, errors.WithStack(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
|
||||
if r.HasError() {
|
||||
return res, r.Error(), nil
|
||||
}
|
||||
|
||||
b, _ := r.ReadBody()
|
||||
|
||||
if err := json.Unmarshal(b, &res); err != nil {
|
||||
return res, nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
type installRequest struct {
|
||||
Successful bool `json:"successful"`
|
||||
}
|
||||
|
||||
// Marks a server as being installed successfully or unsuccessfully on the panel.
|
||||
func (r *PanelRequest) SendInstallationStatus(uuid string, successful bool) (*RequestError, error) {
|
||||
b, err := json.Marshal(installRequest{Successful: successful})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
resp, err := r.Post(fmt.Sprintf("/servers/%s/install", uuid), b)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
if r.HasError() {
|
||||
return r.Error(), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type archiveRequest struct {
|
||||
Successful bool `json:"successful"`
|
||||
}
|
||||
|
||||
func (r *PanelRequest) SendArchiveStatus(uuid string, successful bool) (*RequestError, error) {
|
||||
b, err := json.Marshal(archiveRequest{Successful: successful})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
resp, err := r.Post(fmt.Sprintf("/servers/%s/archive", uuid), b)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
if r.HasError() {
|
||||
return r.Error(), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *PanelRequest) SendTransferFailure(uuid string) (*RequestError, error) {
|
||||
resp, err := r.Get(fmt.Sprintf("/servers/%s/transfer/failure", uuid))
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
if r.HasError() {
|
||||
return r.Error(), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (r *PanelRequest) SendTransferSuccess(uuid string) (*RequestError, error) {
|
||||
resp, err := r.Get(fmt.Sprintf("/servers/%s/transfer/success", uuid))
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
if r.HasError() {
|
||||
return r.Error(), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type SftpAuthRequest struct {
|
||||
User string `json:"username"`
|
||||
Pass string `json:"password"`
|
||||
IP string `json:"ip"`
|
||||
SessionID []byte `json:"session_id"`
|
||||
ClientVersion []byte `json:"client_version"`
|
||||
}
|
||||
|
||||
type SftpAuthResponse struct {
|
||||
Server string `json:"server"`
|
||||
Token string `json:"token"`
|
||||
Permissions []string `json:"permissions"`
|
||||
}
|
||||
|
||||
type sftpInvalidCredentialsError struct {
|
||||
}
|
||||
|
||||
func (ice sftpInvalidCredentialsError) Error() string {
|
||||
return "the credentials provided were invalid"
|
||||
}
|
||||
|
||||
func IsInvalidCredentialsError(err error) bool {
|
||||
_, ok := err.(*sftpInvalidCredentialsError)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// Usernames all follow the same format, so don't even bother hitting the API if the username is not
|
||||
// at least in the expected format. This is very basic protection against random bots finding the SFTP
|
||||
// server and sending a flood of usernames.
|
||||
var validUsernameRegexp = regexp.MustCompile(`^(?i)(.+)\.([a-z0-9]{8})$`)
|
||||
|
||||
func (r *PanelRequest) ValidateSftpCredentials(request SftpAuthRequest) (*SftpAuthResponse, error) {
|
||||
// If the username doesn't meet the expected format that the Panel would even recognize just go ahead
|
||||
// and bail out of the process here to avoid accidentally brute forcing the panel if a bot decides
|
||||
// to connect to spam username attempts.
|
||||
if !validUsernameRegexp.MatchString(request.User) {
|
||||
log.WithFields(log.Fields{
|
||||
"subsystem": "sftp",
|
||||
"username": request.User,
|
||||
"ip": request.IP,
|
||||
}).Warn("failed to validate user credentials (invalid format)")
|
||||
|
||||
return nil, new(sftpInvalidCredentialsError)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := r.Post("/sftp/auth", b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
r.Response = resp
|
||||
|
||||
if r.HasError() {
|
||||
if r.HttpResponseCode() >= 400 && r.HttpResponseCode() < 500 {
|
||||
log.WithFields(log.Fields{
|
||||
"subsystem": "sftp",
|
||||
"username": request.User,
|
||||
"ip": request.IP,
|
||||
}).Warn(r.Error().String())
|
||||
|
||||
return nil, new(sftpInvalidCredentialsError)
|
||||
}
|
||||
|
||||
rerr := errors.New(r.Error().String())
|
||||
|
||||
return nil, rerr
|
||||
}
|
||||
|
||||
response := new(SftpAuthResponse)
|
||||
body, _ := r.ReadBody()
|
||||
|
||||
if err := json.Unmarshal(body, response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// We've gone through a couple of iterations of where the configuration is stored. This
|
||||
// helpful little function will look through the three areas it might have ended up, and
|
||||
// return it.
|
||||
//
|
||||
// We only run this if the configuration flag for the instance is not actually passed in
|
||||
// via the command line. Once found, the configuration is moved into the expected default
|
||||
// location. Only errors are returned from this function, you can safely assume that after
|
||||
// running this the configuration can be found in the correct default location.
|
||||
func RelocateConfiguration() error {
|
||||
var match string
|
||||
check := []string{
|
||||
config.DefaultLocation,
|
||||
"/var/lib/pterodactyl/config.yml",
|
||||
"/etc/wings/config.yml",
|
||||
}
|
||||
|
||||
// Loop over all of the configuration paths, and return which one we found, if
|
||||
// any.
|
||||
for _, p := range check {
|
||||
if s, err := os.Stat(p); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else if !s.IsDir() {
|
||||
match = p
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Just return a generic not exist error at this point if we didn't have a match, this
|
||||
// will allow the caller to handle displaying a more friendly error to the user. If we
|
||||
// did match in the default location, go ahead and return successfully.
|
||||
if match == "" {
|
||||
return os.ErrNotExist
|
||||
} else if match == config.DefaultLocation {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The rest of this function simply creates the new default location and moves the
|
||||
// old configuration file over to the new location, then sets the permissions on the
|
||||
// file correctly so that only the user running this process can read it.
|
||||
p, _ := filepath.Split(config.DefaultLocation)
|
||||
if err := os.MkdirAll(p, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Rename(match, config.DefaultLocation); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Chmod(config.DefaultLocation, 0600)
|
||||
}
|
||||
@@ -4,10 +4,6 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/AlecAivazis/survey/v2/terminal"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/spf13/cobra"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -15,6 +11,11 @@ import (
|
||||
"path"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/AlecAivazis/survey/v2/terminal"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -146,7 +147,7 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
|
||||
|
||||
b, err := ioutil.ReadAll(res.Body)
|
||||
|
||||
cfg, err := config.NewFromPath(configPath)
|
||||
cfg, err := config.NewAtPath(configPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -155,7 +156,7 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err = cfg.WriteToDisk(); err != nil {
|
||||
if err = config.WriteToDisk(cfg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -18,10 +17,13 @@ import (
|
||||
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/AlecAivazis/survey/v2/terminal"
|
||||
"github.com/docker/cli/components/engine/pkg/parsers/operatingsystem"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/parsers/kernel"
|
||||
"github.com/docker/docker/pkg/parsers/operatingsystem"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/loggers/cli"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -39,15 +41,21 @@ var (
|
||||
}
|
||||
)
|
||||
|
||||
var diagnosticsCmd = &cobra.Command{
|
||||
func newDiagnosticsCommand() *cobra.Command {
|
||||
command := &cobra.Command{
|
||||
Use: "diagnostics",
|
||||
Short: "Collect diagnostics information.",
|
||||
Short: "Collect and report information about this Wings instance to assist in debugging.",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
initConfig()
|
||||
log.SetHandler(cli.Default)
|
||||
},
|
||||
Run: diagnosticsCmdRun,
|
||||
}
|
||||
|
||||
func init() {
|
||||
diagnosticsCmd.PersistentFlags().StringVar(&diagnosticsArgs.HastebinURL, "hastebin-url", DefaultHastebinUrl, "The url of the hastebin instance to use.")
|
||||
diagnosticsCmd.PersistentFlags().IntVar(&diagnosticsArgs.LogLines, "log-lines", DefaultLogLines, "The number of log lines to include in the report")
|
||||
command.Flags().StringVar(&diagnosticsArgs.HastebinURL, "hastebin-url", DefaultHastebinUrl, "the url of the hastebin instance to use")
|
||||
command.Flags().IntVar(&diagnosticsArgs.LogLines, "log-lines", DefaultLogLines, "the number of log lines to include in the report")
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
// diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
|
||||
@@ -84,7 +92,6 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
dockerVersion, dockerInfo, dockerErr := getDockerInfo()
|
||||
_ = dockerInfo
|
||||
|
||||
output := &strings.Builder{}
|
||||
fmt.Fprintln(output, "Pterodactyl Wings - Diagnostics Report")
|
||||
@@ -101,8 +108,10 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||
}
|
||||
|
||||
printHeader(output, "Wings Configuration")
|
||||
cfg, err := config.ReadConfiguration(config.DefaultLocation)
|
||||
if cfg != nil {
|
||||
if err := config.FromFile(config.DefaultLocation); err != nil {
|
||||
|
||||
}
|
||||
cfg := config.Get()
|
||||
fmt.Fprintln(output, " Panel Location:", redact(cfg.PanelLocation))
|
||||
fmt.Fprintln(output, "")
|
||||
fmt.Fprintln(output, " Internal Webserver:", redact(cfg.Api.Host), ":", cfg.Api.Port)
|
||||
@@ -122,11 +131,9 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||
fmt.Fprintln(output, " Username:", cfg.System.Username)
|
||||
fmt.Fprintln(output, " Server Time:", time.Now().Format(time.RFC1123Z))
|
||||
fmt.Fprintln(output, " Debug Mode:", cfg.Debug)
|
||||
} else {
|
||||
fmt.Println("Failed to load configuration.", err)
|
||||
}
|
||||
|
||||
printHeader(output, "Docker: Info")
|
||||
if dockerErr == nil {
|
||||
fmt.Fprintln(output, "Server Version:", dockerInfo.ServerVersion)
|
||||
fmt.Fprintln(output, "Storage Driver:", dockerInfo.Driver)
|
||||
if dockerInfo.DriverStatus != nil {
|
||||
@@ -146,6 +153,9 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||
fmt.Fprintln(output, w)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintln(output, dockerErr.Error())
|
||||
}
|
||||
|
||||
printHeader(output, "Docker: Running Containers")
|
||||
c := exec.Command("docker", "ps")
|
||||
@@ -179,23 +189,23 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||
survey.AskOne(&survey.Confirm{Message: "Upload to " + diagnosticsArgs.HastebinURL + "?", Default: false}, &upload)
|
||||
}
|
||||
if upload {
|
||||
url, err := uploadToHastebin(diagnosticsArgs.HastebinURL, output.String())
|
||||
u, err := uploadToHastebin(diagnosticsArgs.HastebinURL, output.String())
|
||||
if err == nil {
|
||||
fmt.Println("Your report is available here: ", url)
|
||||
fmt.Println("Your report is available here: ", u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getDockerInfo() (types.Version, types.Info, error) {
|
||||
cli, err := environment.DockerClient()
|
||||
client, err := environment.Docker()
|
||||
if err != nil {
|
||||
return types.Version{}, types.Info{}, err
|
||||
}
|
||||
dockerVersion, err := cli.ServerVersion(context.Background())
|
||||
dockerVersion, err := client.ServerVersion(context.Background())
|
||||
if err != nil {
|
||||
return types.Version{}, types.Info{}, err
|
||||
}
|
||||
dockerInfo, err := cli.Info(context.Background())
|
||||
dockerInfo, err := client.Info(context.Background())
|
||||
if err != nil {
|
||||
return types.Version{}, types.Info{}, err
|
||||
}
|
||||
|
||||
424
cmd/root.go
424
cmd/root.go
@@ -2,200 +2,205 @@ package cmd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/NYTimes/logrotate"
|
||||
"github.com/apex/log/handlers/multi"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/gammazero/workerpool"
|
||||
"golang.org/x/crypto/acme"
|
||||
log2 "log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/NYTimes/logrotate"
|
||||
"github.com/apex/log"
|
||||
"github.com/apex/log/handlers/multi"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/gammazero/workerpool"
|
||||
"github.com/mitchellh/colorstring"
|
||||
"github.com/pterodactyl/wings/loggers/cli"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pkg/profile"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/loggers/cli"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/router"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/sftp"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
)
|
||||
|
||||
var configPath = config.DefaultLocation
|
||||
var debug = false
|
||||
var shouldRunProfiler = false
|
||||
var useAutomaticTls = false
|
||||
var tlsHostname = ""
|
||||
var showVersion = false
|
||||
var (
|
||||
configPath = config.DefaultLocation
|
||||
debug = false
|
||||
)
|
||||
|
||||
var root = &cobra.Command{
|
||||
var rootCommand = &cobra.Command{
|
||||
Use: "wings",
|
||||
Short: "The wings of the pterodactyl game management panel",
|
||||
Long: ``,
|
||||
Short: "Runs the API server allowing programatic control of game servers for Pterodactyl Panel.",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
if useAutomaticTls && len(tlsHostname) == 0 {
|
||||
initConfig()
|
||||
initLogging()
|
||||
if tls, _ := cmd.Flags().GetBool("auto-tls"); tls {
|
||||
if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" {
|
||||
fmt.Println("A TLS hostname must be provided when running wings with automatic TLS, e.g.:\n\n ./wings --auto-tls --tls-hostname my.example.com")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
Run: rootCmdRun,
|
||||
}
|
||||
|
||||
var versionCommand = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Prints the current executable version and exits.",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
fmt.Printf("wings v%s\nCopyright © 2018 - 2021 Dane Everitt & Contributors\n", system.Version)
|
||||
},
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
if err := rootCommand.Execute(); err != nil {
|
||||
log2.Fatalf("failed to execute command: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
root.PersistentFlags().BoolVar(&showVersion, "version", false, "show the version and exit")
|
||||
root.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
||||
root.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
||||
root.PersistentFlags().BoolVar(&shouldRunProfiler, "profile", false, "pass in order to profile wings")
|
||||
root.PersistentFlags().BoolVar(&useAutomaticTls, "auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
|
||||
root.PersistentFlags().StringVar(&tlsHostname, "tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
||||
rootCommand.PersistentFlags().StringVar(&configPath, "config", config.DefaultLocation, "set the location for the configuration file")
|
||||
rootCommand.PersistentFlags().BoolVar(&debug, "debug", false, "pass in order to run wings in debug mode")
|
||||
|
||||
root.AddCommand(configureCmd)
|
||||
root.AddCommand(diagnosticsCmd)
|
||||
// Flags specifically used when running the API.
|
||||
rootCommand.Flags().String("profiler", "", "the profiler to run for this instance")
|
||||
rootCommand.Flags().Bool("auto-tls", false, "pass in order to have wings generate and manage it's own SSL certificates using Let's Encrypt")
|
||||
rootCommand.Flags().String("tls-hostname", "", "required with --auto-tls, the FQDN for the generated SSL certificate")
|
||||
rootCommand.Flags().Bool("ignore-certificate-errors", false, "ignore certificate verification errors when executing API calls")
|
||||
|
||||
rootCommand.AddCommand(versionCommand)
|
||||
rootCommand.AddCommand(configureCmd)
|
||||
rootCommand.AddCommand(newDiagnosticsCommand())
|
||||
}
|
||||
|
||||
// Get the configuration path based on the arguments provided.
|
||||
func readConfiguration() (*config.Configuration, error) {
|
||||
var p = configPath
|
||||
if !strings.HasPrefix(p, "/") {
|
||||
d, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p = path.Clean(path.Join(d, configPath))
|
||||
}
|
||||
|
||||
if s, err := os.Stat(p); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
} else if s.IsDir() {
|
||||
return nil, errors.New("cannot use directory as configuration file path")
|
||||
}
|
||||
|
||||
return config.ReadConfiguration(p)
|
||||
}
|
||||
|
||||
func rootCmdRun(*cobra.Command, []string) {
|
||||
if showVersion {
|
||||
fmt.Println(system.Version)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if shouldRunProfiler {
|
||||
defer profile.Start().Stop()
|
||||
}
|
||||
|
||||
// Only attempt configuration file relocation if a custom location has not
|
||||
// been specified in the command startup.
|
||||
if configPath == config.DefaultLocation {
|
||||
if err := RelocateConfiguration(); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
exitWithConfigurationNotice()
|
||||
}
|
||||
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
c, err := readConfiguration()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if debug {
|
||||
c.Debug = true
|
||||
func rootCmdRun(cmd *cobra.Command, _ []string) {
|
||||
switch cmd.Flag("profiler").Value.String() {
|
||||
case "cpu":
|
||||
defer profile.Start(profile.CPUProfile).Stop()
|
||||
case "mem":
|
||||
defer profile.Start(profile.MemProfile).Stop()
|
||||
case "alloc":
|
||||
defer profile.Start(profile.MemProfile, profile.MemProfileAllocs()).Stop()
|
||||
case "heap":
|
||||
defer profile.Start(profile.MemProfile, profile.MemProfileHeap()).Stop()
|
||||
case "routines":
|
||||
defer profile.Start(profile.GoroutineProfile).Stop()
|
||||
case "mutex":
|
||||
defer profile.Start(profile.MutexProfile).Stop()
|
||||
case "threads":
|
||||
defer profile.Start(profile.ThreadcreationProfile).Stop()
|
||||
case "block":
|
||||
defer profile.Start(profile.BlockProfile).Stop()
|
||||
}
|
||||
|
||||
printLogo()
|
||||
if err := configureLogging(c.System.LogDirectory, c.Debug); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
log.WithField("path", c.GetPath()).Info("loading configuration from path")
|
||||
if c.Debug {
|
||||
log.Debug("running in debug mode")
|
||||
log.Warn("certificate checking is disabled")
|
||||
log.WithField("config_file", configPath).Info("loading configuration from file")
|
||||
|
||||
if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok {
|
||||
log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified")
|
||||
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
|
||||
config.Set(c)
|
||||
config.SetDebugViaFlag(debug)
|
||||
|
||||
if err := c.System.ConfigureTimezone(); err != nil {
|
||||
if err := config.ConfigureTimezone(); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value")
|
||||
return
|
||||
}
|
||||
|
||||
log.WithField("timezone", c.System.Timezone).Info("configured wings with system timezone")
|
||||
|
||||
if err := c.System.ConfigureDirectories(); err != nil {
|
||||
log.WithField("timezone", config.Get().System.Timezone).Info("configured wings with system timezone")
|
||||
if err := config.ConfigureDirectories(); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to configure system directories for pterodactyl")
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.System.EnableLogRotation(); err != nil {
|
||||
if err := config.EnableLogRotation(); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to configure log rotation on the system")
|
||||
return
|
||||
}
|
||||
|
||||
log.WithField("username", c.System.Username).Info("checking for pterodactyl system user")
|
||||
if su, err := c.EnsurePterodactylUser(); err != nil {
|
||||
log.WithField("username", config.Get().System.User).Info("checking for pterodactyl system user")
|
||||
if err := config.EnsurePterodactylUser(); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to create pterodactyl system user")
|
||||
return
|
||||
} else {
|
||||
}
|
||||
log.WithFields(log.Fields{
|
||||
"username": su.Username,
|
||||
"uid": su.Uid,
|
||||
"gid": su.Gid,
|
||||
"username": config.Get().System.Username,
|
||||
"uid": config.Get().System.User.Uid,
|
||||
"gid": config.Get().System.User.Gid,
|
||||
}).Info("configured system user successfully")
|
||||
}
|
||||
|
||||
if err := server.LoadDirectory(); err != nil {
|
||||
pclient := remote.New(
|
||||
config.Get().PanelLocation,
|
||||
remote.WithCredentials(config.Get().AuthenticationTokenId, config.Get().AuthenticationToken),
|
||||
remote.WithHttpClient(&http.Client{
|
||||
Timeout: time.Second * time.Duration(config.Get().RemoteQuery.Timeout),
|
||||
}),
|
||||
)
|
||||
|
||||
manager, err := server.NewManager(cmd.Context(), pclient)
|
||||
if err != nil {
|
||||
log.WithField("error", err).Fatal("failed to load server configurations")
|
||||
return
|
||||
}
|
||||
|
||||
if err := environment.ConfigureDocker(&c.Docker); err != nil {
|
||||
if err := environment.ConfigureDocker(cmd.Context()); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to configure docker environment")
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.WriteToDisk(); err != nil {
|
||||
log.WithField("error", err).Error("failed to save configuration to disk")
|
||||
if err := config.WriteToDisk(config.Get()); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to write configuration to disk")
|
||||
}
|
||||
|
||||
// Just for some nice log output.
|
||||
for _, s := range server.GetServers().All() {
|
||||
log.WithField("server", s.Id()).Info("loaded configuration for server")
|
||||
for _, s := range manager.All() {
|
||||
log.WithField("server", s.Id()).Info("finished loading configuration for server")
|
||||
}
|
||||
|
||||
states, err := server.CachedServerStates()
|
||||
states, err := manager.ReadStates()
|
||||
if err != nil {
|
||||
log.WithField("error", errors.WithStack(err)).Error("failed to retrieve locally cached server states from disk, assuming all servers in offline state")
|
||||
log.WithField("error", err).Error("failed to retrieve locally cached server states from disk, assuming all servers in offline state")
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Minute)
|
||||
// Every minute, write the current server states to the disk to allow for a more
|
||||
// seamless hard-reboot process in which wings will re-sync server states based
|
||||
// on it's last tracked state.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := manager.PersistStates(); err != nil {
|
||||
log.WithField("error", err).Warn("failed to persist server states to disk")
|
||||
}
|
||||
case <-cmd.Context().Done():
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a new workerpool that limits us to 4 servers being bootstrapped at a time
|
||||
// on Wings. This allows us to ensure the environment exists, write configurations,
|
||||
// and reboot processes without causing a slow-down due to sequential booting.
|
||||
pool := workerpool.New(4)
|
||||
|
||||
for _, serv := range server.GetServers().All() {
|
||||
for _, serv := range manager.All() {
|
||||
s := serv
|
||||
|
||||
// For each server we encounter make sure the root data directory exists.
|
||||
if err := s.EnsureDataDirectoryExists(); err != nil {
|
||||
s.Log().Error("could not create root data directory for server: not loading server...")
|
||||
continue
|
||||
}
|
||||
|
||||
pool.Submit(func() {
|
||||
s.Log().Info("configuring server environment and restoring to previous state")
|
||||
|
||||
var st string
|
||||
if state, exists := states[s.Id()]; exists {
|
||||
st = state
|
||||
@@ -218,7 +223,7 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
// as a result will result in a slow boot.
|
||||
if !r && (st == environment.ProcessRunningState || st == environment.ProcessStartingState) {
|
||||
if err := s.HandlePowerAction(server.PowerActionStart); err != nil {
|
||||
s.Log().WithField("error", errors.WithStack(err)).Warn("failed to return server to running state")
|
||||
s.Log().WithField("error", err).Warn("failed to return server to running state")
|
||||
}
|
||||
} else if r || (!r && s.IsRunning()) {
|
||||
// If the server is currently running on Docker, mark the process as being in that state.
|
||||
@@ -229,95 +234,88 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
// is that it was running, but we see that the container process is not currently running.
|
||||
s.Log().Info("detected server is running, re-attaching to process...")
|
||||
|
||||
s.SetState(environment.ProcessRunningState)
|
||||
s.Environment.SetState(environment.ProcessRunningState)
|
||||
if err := s.Environment.Attach(); err != nil {
|
||||
s.Log().WithField("error", errors.WithStack(err)).Warn("failed to attach to running server environment")
|
||||
s.Log().WithField("error", err).Warn("failed to attach to running server environment")
|
||||
}
|
||||
|
||||
return
|
||||
} else {
|
||||
// At this point we've determined that the server should indeed be in an offline state, so we'll
|
||||
// make a call to set that state just to ensure we don't ever accidentally end up with some invalid
|
||||
// state being tracked.
|
||||
s.Environment.SetState(environment.ProcessOfflineState)
|
||||
}
|
||||
|
||||
// Addresses potentially invalid data in the stored file that can cause Wings to lose
|
||||
// track of what the actual server state is.
|
||||
_ = s.SetState(environment.ProcessOfflineState)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait until all of the servers are ready to go before we fire up the SFTP and HTTP servers.
|
||||
pool.StopWait()
|
||||
defer func() {
|
||||
// Cancel the context on all of the running servers at this point, even though the
|
||||
// program is just shutting down.
|
||||
for _, s := range manager.All() {
|
||||
s.CtxCancel()
|
||||
}
|
||||
}()
|
||||
|
||||
// Initialize the SFTP server.
|
||||
if err := sftp.Initialize(c.System); err != nil {
|
||||
go func() {
|
||||
// Run the SFTP server.
|
||||
if err := sftp.New(manager).Run(); err != nil {
|
||||
log.WithError(err).Fatal("failed to initialize the sftp server")
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
log.Info("updating server states on Panel: marking installing/restoring servers as normal")
|
||||
// Update all of the servers on the Panel to be in a valid state if they're
|
||||
// currently marked as installing/restoring now that Wings is restarted.
|
||||
if err := pclient.ResetServersState(cmd.Context()); err != nil {
|
||||
log.WithField("error", err).Error("failed to reset server states on Panel: some instances may be stuck in an installing/restoring state unexpectedly")
|
||||
}
|
||||
}()
|
||||
|
||||
sys := config.Get().System
|
||||
// Ensure the archive directory exists.
|
||||
if err := os.MkdirAll(c.System.ArchiveDirectory, 0755); err != nil {
|
||||
if err := os.MkdirAll(sys.ArchiveDirectory, 0755); err != nil {
|
||||
log.WithField("error", err).Error("failed to create archive directory")
|
||||
}
|
||||
|
||||
// Ensure the backup directory exists.
|
||||
if err := os.MkdirAll(c.System.BackupDirectory, 0755); err != nil {
|
||||
if err := os.MkdirAll(sys.BackupDirectory, 0755); err != nil {
|
||||
log.WithField("error", err).Error("failed to create backup directory")
|
||||
}
|
||||
|
||||
autotls, _ := cmd.Flags().GetBool("auto-tls")
|
||||
tlshostname, _ := cmd.Flags().GetString("tls-hostname")
|
||||
if autotls && tlshostname == "" {
|
||||
autotls = false
|
||||
}
|
||||
|
||||
api := config.Get().Api
|
||||
log.WithFields(log.Fields{
|
||||
"use_ssl": c.Api.Ssl.Enabled,
|
||||
"use_auto_tls": useAutomaticTls && len(tlsHostname) > 0,
|
||||
"host_address": c.Api.Host,
|
||||
"host_port": c.Api.Port,
|
||||
"use_ssl": api.Ssl.Enabled,
|
||||
"use_auto_tls": autotls,
|
||||
"host_address": api.Host,
|
||||
"host_port": api.Port,
|
||||
}).Info("configuring internal webserver")
|
||||
|
||||
// Configure the router.
|
||||
r := router.Configure()
|
||||
|
||||
// Create a new HTTP server instance to handle inbound requests from the Panel
|
||||
// and external clients.
|
||||
s := &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%d", c.Api.Host, c.Api.Port),
|
||||
Handler: r,
|
||||
|
||||
TLSConfig: &tls.Config{
|
||||
NextProtos: []string{
|
||||
"h2", // enable HTTP/2
|
||||
"http/1.1",
|
||||
},
|
||||
|
||||
// https://blog.cloudflare.com/exposing-go-on-the-internet
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
},
|
||||
|
||||
PreferServerCipherSuites: true,
|
||||
|
||||
MinVersion: tls.VersionTLS12,
|
||||
MaxVersion: tls.VersionTLS13,
|
||||
|
||||
CurvePreferences: []tls.CurveID{
|
||||
tls.X25519,
|
||||
tls.CurveP256,
|
||||
},
|
||||
// END https://blog.cloudflare.com/exposing-go-on-the-internet
|
||||
},
|
||||
Addr: api.Host + ":" + strconv.Itoa(api.Port),
|
||||
Handler: router.Configure(manager, pclient),
|
||||
TLSConfig: config.DefaultTLSConfig,
|
||||
}
|
||||
|
||||
// Check if the server should run with TLS but using autocert.
|
||||
if useAutomaticTls && len(tlsHostname) > 0 {
|
||||
if autotls {
|
||||
m := autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
Cache: autocert.DirCache(path.Join(c.System.RootDirectory, "/.tls-cache")),
|
||||
HostPolicy: autocert.HostWhitelist(tlsHostname),
|
||||
Cache: autocert.DirCache(path.Join(sys.RootDirectory, "/.tls-cache")),
|
||||
HostPolicy: autocert.HostWhitelist(tlshostname),
|
||||
}
|
||||
|
||||
log.WithField("hostname", tlsHostname).
|
||||
Info("webserver is now listening with auto-TLS enabled; certificates will be automatically generated by Let's Encrypt")
|
||||
log.WithField("hostname", tlshostname).Info("webserver is now listening with auto-TLS enabled; certificates will be automatically generated by Let's Encrypt")
|
||||
|
||||
// Hook autocert into the main http server.
|
||||
s.TLSConfig.GetCertificate = m.GetCertificate
|
||||
@@ -329,66 +327,67 @@ func rootCmdRun(*cobra.Command, []string) {
|
||||
log.WithError(err).Error("failed to serve autocert http server")
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the main http server with TLS using autocert.
|
||||
if err := s.ListenAndServeTLS("", ""); err != nil {
|
||||
log.WithFields(log.Fields{"auto_tls": true, "tls_hostname": tlsHostname, "error": err}).
|
||||
Fatal("failed to configure HTTP server using auto-tls")
|
||||
os.Exit(1)
|
||||
log.WithFields(log.Fields{"auto_tls": true, "tls_hostname": tlshostname, "error": err}).Fatal("failed to configure HTTP server using auto-tls")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Check if main http server should run with TLS.
|
||||
if c.Api.Ssl.Enabled {
|
||||
if err := s.ListenAndServeTLS(c.Api.Ssl.CertificateFile, c.Api.Ssl.KeyFile); err != nil {
|
||||
// Check if main http server should run with TLS. Otherwise reset the TLS
|
||||
// config on the server and then serve it over normal HTTP.
|
||||
if api.Ssl.Enabled {
|
||||
if err := s.ListenAndServeTLS(strings.ToLower(api.Ssl.CertificateFile), strings.ToLower(api.Ssl.KeyFile)); err != nil {
|
||||
log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server")
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Run the main http server without TLS.
|
||||
s.TLSConfig = nil
|
||||
if err := s.ListenAndServe(); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to configure HTTP server")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute calls cobra to handle cli commands
|
||||
func Execute() error {
|
||||
return root.Execute()
|
||||
// Reads the configuration from the disk and then sets up the global singleton
|
||||
// with all of the configuration values.
|
||||
func initConfig() {
|
||||
if !strings.HasPrefix(configPath, "/") {
|
||||
d, err := os.Getwd()
|
||||
if err != nil {
|
||||
log2.Fatalf("cmd/root: could not determine directory: %s", err)
|
||||
}
|
||||
configPath = path.Clean(path.Join(d, configPath))
|
||||
}
|
||||
err := config.FromFile(configPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
exitWithConfigurationNotice()
|
||||
}
|
||||
log2.Fatalf("cmd/root: error while reading configuration file: %s", err)
|
||||
}
|
||||
if debug && !config.Get().Debug {
|
||||
config.SetDebugViaFlag(debug)
|
||||
}
|
||||
}
|
||||
|
||||
// Configures the global logger for Zap so that we can call it from any location
|
||||
// in the code without having to pass around a logger instance.
|
||||
func configureLogging(logDir string, debug bool) error {
|
||||
if err := os.MkdirAll(path.Join(logDir, "/install"), 0700); err != nil {
|
||||
return errors.WithStack(err)
|
||||
func initLogging() {
|
||||
dir := config.Get().System.LogDirectory
|
||||
if err := os.MkdirAll(path.Join(dir, "/install"), 0700); err != nil {
|
||||
log2.Fatalf("cmd/root: failed to create install directory path: %s", err)
|
||||
}
|
||||
|
||||
p := filepath.Join(logDir, "/wings.log")
|
||||
p := filepath.Join(dir, "/wings.log")
|
||||
w, err := logrotate.NewFile(p)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "failed to open process log file"))
|
||||
log2.Fatalf("cmd/root: failed to create wings log: %s", err)
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
} else {
|
||||
log.SetLevel(log.InfoLevel)
|
||||
if config.Get().Debug {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
}
|
||||
|
||||
log.SetHandler(multi.New(
|
||||
cli.Default,
|
||||
cli.New(w.File, false),
|
||||
))
|
||||
|
||||
log.SetHandler(multi.New(cli.Default, cli.New(w.File, false)))
|
||||
log.WithField("path", p).Info("writing log files to disk")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prints the wings logo, nothing special here!
|
||||
@@ -399,9 +398,9 @@ __ [blue][bold]Pterodactyl[reset] _____/___/_______ _______ ______
|
||||
\_____\ \/\/ / / / __ / ___/
|
||||
\___\ / / / / /_/ /___ /
|
||||
\___/\___/___/___/___/___ /______/
|
||||
/_______/ [bold]v%s[reset]
|
||||
/_______/ [bold]%s[reset]
|
||||
|
||||
Copyright © 2018 - 2020 Dane Everitt & Contributors
|
||||
Copyright © 2018 - 2021 Dane Everitt & Contributors
|
||||
|
||||
Website: https://pterodactyl.io
|
||||
Source: https://github.com/pterodactyl/wings
|
||||
@@ -417,11 +416,8 @@ func exitWithConfigurationNotice() {
|
||||
[_red_][white][bold]Error: Configuration File Not Found[reset]
|
||||
|
||||
Wings was not able to locate your configuration file, and therefore is not
|
||||
able to complete its boot process.
|
||||
|
||||
Please ensure you have copied your instance configuration file into
|
||||
the default location, or have provided the --config flag to use a
|
||||
custom location.
|
||||
able to complete its boot process. Please ensure you have copied your instance
|
||||
configuration file into the default location below.
|
||||
|
||||
Default Location: /etc/pterodactyl/config.yml
|
||||
|
||||
|
||||
685
config/config.go
685
config/config.go
@@ -1,32 +1,247 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"github.com/cobaugh/osrelease"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/cobaugh/osrelease"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const DefaultLocation = "/etc/pterodactyl/config.yml"
|
||||
|
||||
type Configuration struct {
|
||||
sync.RWMutex `json:"-" yaml:"-"`
|
||||
// DefaultTLSConfig sets sane defaults to use when configuring the internal
|
||||
// webserver to listen for public connections.
|
||||
//
|
||||
// @see https://blog.cloudflare.com/exposing-go-on-the-internet
|
||||
var DefaultTLSConfig = &tls.Config{
|
||||
NextProtos: []string{"h2", "http/1.1"},
|
||||
CipherSuites: []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
},
|
||||
PreferServerCipherSuites: true,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
MaxVersion: tls.VersionTLS13,
|
||||
CurvePreferences: []tls.CurveID{tls.X25519, tls.CurveP256},
|
||||
}
|
||||
|
||||
// The location from which this configuration instance was instantiated.
|
||||
path string
|
||||
var mu sync.RWMutex
|
||||
var _config *Configuration
|
||||
var _jwtAlgo *jwt.HMACSHA
|
||||
var _debugViaFlag bool
|
||||
|
||||
// Locker specific to writing the configuration to the disk, this happens
|
||||
// in areas that might already be locked so we don't want to crash the process.
|
||||
writeLock sync.Mutex
|
||||
var _writeLock sync.Mutex
|
||||
|
||||
// SftpConfiguration defines the configuration of the internal SFTP server.
|
||||
type SftpConfiguration struct {
|
||||
// The bind address of the SFTP server.
|
||||
Address string `default:"0.0.0.0" json:"bind_address" yaml:"bind_address"`
|
||||
// The bind port of the SFTP server.
|
||||
Port int `default:"2022" json:"bind_port" yaml:"bind_port"`
|
||||
// If set to true, no write actions will be allowed on the SFTP server.
|
||||
ReadOnly bool `default:"false" yaml:"read_only"`
|
||||
}
|
||||
|
||||
// ApiConfiguration defines the configuration for the internal API that is
|
||||
// exposed by the Wings webserver.
|
||||
type ApiConfiguration struct {
|
||||
// The interface that the internal webserver should bind to.
|
||||
Host string `default:"0.0.0.0" yaml:"host"`
|
||||
|
||||
// The port that the internal webserver should bind to.
|
||||
Port int `default:"8080" yaml:"port"`
|
||||
|
||||
// SSL configuration for the daemon.
|
||||
Ssl struct {
|
||||
Enabled bool `json:"enabled" yaml:"enabled"`
|
||||
CertificateFile string `json:"cert" yaml:"cert"`
|
||||
KeyFile string `json:"key" yaml:"key"`
|
||||
}
|
||||
|
||||
// Determines if functionality for allowing remote download of files into server directories
|
||||
// is enabled on this instance. If set to "true" remote downloads will not be possible for
|
||||
// servers.
|
||||
DisableRemoteDownload bool `json:"disable_remote_download" yaml:"disable_remote_download"`
|
||||
|
||||
// The maximum size for files uploaded through the Panel in bytes.
|
||||
UploadLimit int `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
||||
}
|
||||
|
||||
// RemoteQueryConfiguration defines the configuration settings for remote requests
|
||||
// from Wings to the Panel.
|
||||
type RemoteQueryConfiguration struct {
|
||||
// The amount of time in seconds that Wings should allow for a request to the Panel API
|
||||
// to complete. If this time passes the request will be marked as failed. If your requests
|
||||
// are taking longer than 30 seconds to complete it is likely a performance issue that
|
||||
// should be resolved on the Panel, and not something that should be resolved by upping this
|
||||
// number.
|
||||
Timeout int `default:"30" yaml:"timeout"`
|
||||
|
||||
// The number of servers to load in a single request to the Panel API when booting the
|
||||
// Wings instance. A single request is initially made to the Panel to get this number
|
||||
// of servers, and then the pagination status is checked and additional requests are
|
||||
// fired off in parallel to request the remaining pages.
|
||||
//
|
||||
// It is not recommended to change this from the default as you will likely encounter
|
||||
// memory limits on your Panel instance. In the grand scheme of things 4 requests for
|
||||
// 50 servers is likely just as quick as two for 100 or one for 400, and will certainly
|
||||
// be less likely to cause performance issues on the Panel.
|
||||
BootServersPerPage int `default:"50" yaml:"boot_servers_per_page"`
|
||||
}
|
||||
|
||||
// SystemConfiguration defines basic system configuration settings.
|
||||
type SystemConfiguration struct {
|
||||
// The root directory where all of the pterodactyl data is stored at.
|
||||
RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"`
|
||||
|
||||
// Directory where logs for server installations and other wings events are logged.
|
||||
LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"`
|
||||
|
||||
// Directory where the server data is stored at.
|
||||
Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"`
|
||||
|
||||
// Directory where server archives for transferring will be stored.
|
||||
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"`
|
||||
|
||||
// Directory where local backups will be stored on the machine.
|
||||
BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
|
||||
|
||||
// The user that should own all of the server files, and be used for containers.
|
||||
Username string `default:"pterodactyl" yaml:"username"`
|
||||
|
||||
// The timezone for this Wings instance. This is detected by Wings automatically if possible,
|
||||
// and falls back to UTC if not able to be detected. If you need to set this manually, that
|
||||
// can also be done.
|
||||
//
|
||||
// This timezone value is passed into all containers created by Wings.
|
||||
Timezone string `yaml:"timezone"`
|
||||
|
||||
// Definitions for the user that gets created to ensure that we can quickly access
|
||||
// this information without constantly having to do a system lookup.
|
||||
User struct {
|
||||
Uid int
|
||||
Gid int
|
||||
}
|
||||
|
||||
// The amount of time in seconds that can elapse before a server's disk space calculation is
|
||||
// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
|
||||
// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
|
||||
// process.
|
||||
//
|
||||
// Set to 0 to disable disk checking entirely. This will always return 0 for the disk space used
|
||||
// by a server and should only be set in extreme scenarios where performance is critical and
|
||||
// disk usage is not a concern.
|
||||
DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"`
|
||||
|
||||
// If set to true, file permissions for a server will be checked when the process is
|
||||
// booted. This can cause boot delays if the server has a large amount of files. In most
|
||||
// cases disabling this should not have any major impact unless external processes are
|
||||
// frequently modifying a servers' files.
|
||||
CheckPermissionsOnBoot bool `default:"true" yaml:"check_permissions_on_boot"`
|
||||
|
||||
// If set to false Wings will not attempt to write a log rotate configuration to the disk
|
||||
// when it boots and one is not detected.
|
||||
EnableLogRotate bool `default:"true" yaml:"enable_log_rotate"`
|
||||
|
||||
// The number of lines to send when a server connects to the websocket.
|
||||
WebsocketLogCount int `default:"150" yaml:"websocket_log_count"`
|
||||
|
||||
Sftp SftpConfiguration `yaml:"sftp"`
|
||||
|
||||
CrashDetection CrashDetection `yaml:"crash_detection"`
|
||||
|
||||
Backups Backups `yaml:"backups"`
|
||||
|
||||
Transfers Transfers `yaml:"transfers"`
|
||||
}
|
||||
|
||||
type CrashDetection struct {
|
||||
// Determines if Wings should detect a server that stops with a normal exit code of
|
||||
// "0" as being crashed if the process stopped without any Wings interaction. E.g.
|
||||
// the user did not press the stop button, but the process stopped cleanly.
|
||||
DetectCleanExitAsCrash bool `default:"true" yaml:"detect_clean_exit_as_crash"`
|
||||
|
||||
// Timeout specifies the timeout between crashes that will not cause the server
|
||||
// to be automatically restarted, this value is used to prevent servers from
|
||||
// becoming stuck in a boot-loop after multiple consecutive crashes.
|
||||
Timeout int `default:"60" json:"timeout"`
|
||||
}
|
||||
|
||||
type Backups struct {
|
||||
// WriteLimit imposes a Disk I/O write limit on backups to the disk, this affects all
|
||||
// backup drivers as the archiver must first write the file to the disk in order to
|
||||
// upload it to any external storage provider.
|
||||
//
|
||||
// If the value is less than 1, the write speed is unlimited,
|
||||
// if the value is greater than 0, the write speed is the value in MiB/s.
|
||||
//
|
||||
// Defaults to 0 (unlimited)
|
||||
WriteLimit int `default:"0" yaml:"write_limit"`
|
||||
}
|
||||
|
||||
type Transfers struct {
|
||||
// DownloadLimit imposes a Network I/O read limit when downloading a transfer archive.
|
||||
//
|
||||
// If the value is less than 1, the write speed is unlimited,
|
||||
// if the value is greater than 0, the write speed is the value in MiB/s.
|
||||
//
|
||||
// Defaults to 0 (unlimited)
|
||||
DownloadLimit int `default:"0" yaml:"download_limit"`
|
||||
}
|
||||
|
||||
type ConsoleThrottles struct {
|
||||
// Whether or not the throttler is enabled for this instance.
|
||||
Enabled bool `json:"enabled" yaml:"enabled" default:"true"`
|
||||
|
||||
// The total number of lines that can be output in a given LineResetInterval period before
|
||||
// a warning is triggered and counted against the server.
|
||||
Lines uint64 `json:"lines" yaml:"lines" default:"2000"`
|
||||
|
||||
// The total number of throttle activations that can accumulate before a server is considered
|
||||
// to be breaching and will be stopped. This value is decremented by one every DecayInterval.
|
||||
MaximumTriggerCount uint64 `json:"maximum_trigger_count" yaml:"maximum_trigger_count" default:"5"`
|
||||
|
||||
// The amount of time after which the number of lines processed is reset to 0. This runs in
|
||||
// a constant loop and is not affected by the current console output volumes. By default, this
|
||||
// will reset the processed line count back to 0 every 100ms.
|
||||
LineResetInterval uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"`
|
||||
|
||||
// The amount of time in milliseconds that must pass without an output warning being triggered
|
||||
// before a throttle activation is decremented.
|
||||
DecayInterval uint64 `json:"decay_interval" yaml:"decay_interval" default:"10000"`
|
||||
|
||||
// The amount of time that a server is allowed to be stopping for before it is terminated
|
||||
// forcefully if it triggers output throttles.
|
||||
StopGracePeriod uint `json:"stop_grace_period" yaml:"stop_grace_period" default:"15"`
|
||||
}
|
||||
|
||||
type Configuration struct {
|
||||
// The location from which this configuration instance was instantiated.
|
||||
path string
|
||||
|
||||
// Determines if wings should be running in debug mode. This value is ignored
|
||||
// if the debug flag is passed through the command line arguments.
|
||||
@@ -47,12 +262,6 @@ type Configuration struct {
|
||||
System SystemConfiguration `json:"system" yaml:"system"`
|
||||
Docker DockerConfiguration `json:"docker" yaml:"docker"`
|
||||
|
||||
// The amount of time in seconds that should elapse between disk usage checks
|
||||
// run by the daemon. Setting a higher number can result in better IO performance
|
||||
// at an increased risk of a malicious user creating a process that goes over
|
||||
// the assigned disk limits.
|
||||
DiskCheckTimeout int `yaml:"disk_check_timeout"`
|
||||
|
||||
// Defines internal throttling configurations for server processes to prevent
|
||||
// someone from running an endless loop that spams data to logs.
|
||||
Throttles ConsoleThrottles
|
||||
@@ -60,6 +269,7 @@ type Configuration struct {
|
||||
// The location where the panel is running that this daemon should connect to
|
||||
// to collect data and send events.
|
||||
PanelLocation string `json:"remote" yaml:"remote"`
|
||||
RemoteQuery RemoteQueryConfiguration `json:"remote_query" yaml:"remote_query"`
|
||||
|
||||
// AllowedMounts is a list of allowed host-system mount points.
|
||||
// This is required to have the "Server Mounts" feature work properly.
|
||||
@@ -71,235 +281,340 @@ type Configuration struct {
|
||||
AllowedOrigins []string `json:"allowed_origins" yaml:"allowed_origins"`
|
||||
}
|
||||
|
||||
// Defines the configuration of the internal SFTP server.
|
||||
type SftpConfiguration struct {
|
||||
// The bind address of the SFTP server.
|
||||
Address string `default:"0.0.0.0" json:"bind_address" yaml:"bind_address"`
|
||||
// The bind port of the SFTP server.
|
||||
Port int `default:"2022" json:"bind_port" yaml:"bind_port"`
|
||||
// If set to true, no write actions will be allowed on the SFTP server.
|
||||
ReadOnly bool `default:"false" yaml:"read_only"`
|
||||
}
|
||||
|
||||
// Defines the configuration for the internal API that is exposed by the
|
||||
// daemon webserver.
|
||||
type ApiConfiguration struct {
|
||||
// The interface that the internal webserver should bind to.
|
||||
Host string `default:"0.0.0.0" yaml:"host"`
|
||||
|
||||
// The port that the internal webserver should bind to.
|
||||
Port int `default:"8080" yaml:"port"`
|
||||
|
||||
// SSL configuration for the daemon.
|
||||
Ssl struct {
|
||||
Enabled bool `default:"false"`
|
||||
CertificateFile string `json:"cert" yaml:"cert"`
|
||||
KeyFile string `json:"key" yaml:"key"`
|
||||
}
|
||||
|
||||
// The maximum size for files uploaded through the Panel in bytes.
|
||||
UploadLimit int `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
||||
}
|
||||
|
||||
// Reads the configuration from the provided file and returns the configuration
|
||||
// object that can then be used.
|
||||
func ReadConfiguration(path string) (*Configuration, error) {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := new(Configuration)
|
||||
// NewAtPath creates a new struct and set the path where it should be stored.
|
||||
// This function does not modify the currently stored global configuration.
|
||||
func NewAtPath(path string) (*Configuration, error) {
|
||||
var c Configuration
|
||||
// Configures the default values for many of the configuration options present
|
||||
// in the structs. Values set in the configuration file take priority over the
|
||||
// default values.
|
||||
if err := defaults.Set(c); err != nil {
|
||||
if err := defaults.Set(&c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Track the location where we created this configuration.
|
||||
c.unsafeSetPath(path)
|
||||
|
||||
// Replace environment variables within the configuration file with their
|
||||
// values from the host system.
|
||||
b = []byte(os.ExpandEnv(string(b)))
|
||||
|
||||
if err := yaml.Unmarshal(b, c); err != nil {
|
||||
return nil, err
|
||||
c.path = path
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
var mu sync.RWMutex
|
||||
|
||||
var _config *Configuration
|
||||
var _jwtAlgo *jwt.HMACSHA
|
||||
var _debugViaFlag bool
|
||||
|
||||
// Set the global configuration instance. This is a blocking operation such that
|
||||
// anything trying to set a different configuration value, or read the configuration
|
||||
// will be paused until it is complete.
|
||||
func Set(c *Configuration) {
|
||||
mu.Lock()
|
||||
|
||||
if _config == nil || _config.AuthenticationToken != c.AuthenticationToken {
|
||||
_jwtAlgo = jwt.NewHS256([]byte(c.AuthenticationToken))
|
||||
}
|
||||
|
||||
_config = c
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// SetDebugViaFlag tracks if the application is running in debug mode because of
|
||||
// a command line flag argument. If so we do not want to store that configuration
|
||||
// change to the disk.
|
||||
func SetDebugViaFlag(d bool) {
|
||||
mu.Lock()
|
||||
_config.Debug = d
|
||||
_debugViaFlag = d
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// Get the global configuration instance. This is a read-safe operation that will block
|
||||
// if the configuration is presently being modified.
|
||||
// Get returns the global configuration instance. This is a thread-safe operation
|
||||
// that will block if the configuration is presently being modified.
|
||||
//
|
||||
// Be aware that you CANNOT make modifications to the currently stored configuration
|
||||
// by modifying the struct returned by this function. The only way to make
|
||||
// modifications is by using the Update() function and passing data through in
|
||||
// the callback.
|
||||
func Get() *Configuration {
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
return _config
|
||||
// Create a copy of the struct so that all modifications made beyond this
|
||||
// point are immutable.
|
||||
//goland:noinspection GoVetCopyLock
|
||||
c := *_config
|
||||
mu.RUnlock()
|
||||
return &c
|
||||
}
|
||||
|
||||
// Returns the in-memory JWT algorithm.
|
||||
// Update performs an in-situ update of the global configuration object using
|
||||
// a thread-safe mutex lock. This is the correct way to make modifications to
|
||||
// the global configuration.
|
||||
func Update(callback func(c *Configuration)) {
|
||||
mu.Lock()
|
||||
callback(_config)
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// GetJwtAlgorithm returns the in-memory JWT algorithm.
|
||||
func GetJwtAlgorithm() *jwt.HMACSHA {
|
||||
mu.RLock()
|
||||
defer mu.RUnlock()
|
||||
|
||||
return _jwtAlgo
|
||||
}
|
||||
|
||||
// Create a new struct and set the path where it should be stored.
|
||||
func NewFromPath(path string) (*Configuration, error) {
|
||||
c := new(Configuration)
|
||||
if err := defaults.Set(c); err != nil {
|
||||
return c, errors.WithStack(err)
|
||||
// WriteToDisk writes the configuration to the disk. This is a thread safe operation
|
||||
// and will only allow one write at a time. Additional calls while writing are
|
||||
// queued up.
|
||||
func WriteToDisk(c *Configuration) error {
|
||||
_writeLock.Lock()
|
||||
defer _writeLock.Unlock()
|
||||
|
||||
//goland:noinspection GoVetCopyLock
|
||||
ccopy := *c
|
||||
// If debugging is set with the flag, don't save that to the configuration file,
|
||||
// otherwise you'll always end up in debug mode.
|
||||
if _debugViaFlag {
|
||||
ccopy.Debug = false
|
||||
}
|
||||
if c.path == "" {
|
||||
return errors.New("cannot write configuration, no path defined in struct")
|
||||
}
|
||||
b, err := yaml.Marshal(&ccopy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(c.path, b, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
c.unsafeSetPath(path)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Sets the path where the configuration file is located on the server. This function should
|
||||
// not be called except by processes that are generating the configuration such as the configuration
|
||||
// command shipped with this software.
|
||||
func (c *Configuration) unsafeSetPath(path string) {
|
||||
c.Lock()
|
||||
c.path = path
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// Returns the path for this configuration file.
|
||||
func (c *Configuration) GetPath() string {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return c.path
|
||||
}
|
||||
|
||||
// Ensures that the Pterodactyl core user exists on the system. This user will be the
|
||||
// owner of all data in the root data directory and is used as the user within containers.
|
||||
// EnsurePterodactylUser ensures that the Pterodactyl core user exists on the
|
||||
// system. This user will be the owner of all data in the root data directory
|
||||
// and is used as the user within containers. If files are not owned by this
|
||||
// user there will be issues with permissions on Docker mount points.
|
||||
//
|
||||
// If files are not owned by this user there will be issues with permissions on Docker
|
||||
// mount points.
|
||||
func (c *Configuration) EnsurePterodactylUser() (*user.User, error) {
|
||||
u, err := user.Lookup(c.System.Username)
|
||||
|
||||
// If an error is returned but it isn't the unknown user error just abort
|
||||
// the process entirely. If we did find a user, return it immediately.
|
||||
if err == nil {
|
||||
return u, c.setSystemUser(u)
|
||||
} else if _, ok := err.(user.UnknownUserError); !ok {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// This function IS NOT thread safe and should only be called in the main thread
|
||||
// when the application is booting.
|
||||
func EnsurePterodactylUser() error {
|
||||
sysName, err := getSystemName()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var command = fmt.Sprintf("useradd --system --no-create-home --shell /bin/false %s", c.System.Username)
|
||||
// Our way of detecting if wings is running inside of Docker.
|
||||
if sysName == "busybox" {
|
||||
_config.System.Username = system.FirstNotEmpty(os.Getenv("WINGS_USERNAME"), "pterodactyl")
|
||||
_config.System.User.Uid = system.MustInt(system.FirstNotEmpty(os.Getenv("WINGS_UID"), "988"))
|
||||
_config.System.User.Gid = system.MustInt(system.FirstNotEmpty(os.Getenv("WINGS_UID"), "988"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Alpine Linux is the only OS we currently support that doesn't work with the useradd command, so
|
||||
// in those cases we just modify the command a bit to work as expected.
|
||||
u, err := user.Lookup(_config.System.Username)
|
||||
// If an error is returned but it isn't the unknown user error just abort
|
||||
// the process entirely. If we did find a user, return it immediately.
|
||||
if err != nil {
|
||||
if _, ok := err.(user.UnknownUserError); !ok {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
_config.System.User.Uid = system.MustInt(u.Uid)
|
||||
_config.System.User.Gid = system.MustInt(u.Gid)
|
||||
return nil
|
||||
}
|
||||
|
||||
command := fmt.Sprintf("useradd --system --no-create-home --shell /usr/sbin/nologin %s", _config.System.Username)
|
||||
// Alpine Linux is the only OS we currently support that doesn't work with the useradd
|
||||
// command, so in those cases we just modify the command a bit to work as expected.
|
||||
if strings.HasPrefix(sysName, "alpine") {
|
||||
command = fmt.Sprintf("adduser -S -D -H -G %[1]s -s /bin/false %[1]s", c.System.Username)
|
||||
|
||||
command = fmt.Sprintf("adduser -S -D -H -G %[1]s -s /sbin/nologin %[1]s", _config.System.Username)
|
||||
// We have to create the group first on Alpine, so do that here before continuing on
|
||||
// to the user creation process.
|
||||
if _, err := exec.Command("addgroup", "-S", c.System.Username).Output(); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
if _, err := exec.Command("addgroup", "-S", _config.System.Username).Output(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
split := strings.Split(command, " ")
|
||||
if _, err := exec.Command(split[0], split[1:]...).Output(); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if u, err := user.Lookup(c.System.Username); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
} else {
|
||||
return u, c.setSystemUser(u)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the system user into the configuration and then write it to the disk so that
|
||||
// it is persisted on boot.
|
||||
func (c *Configuration) setSystemUser(u *user.User) error {
|
||||
uid, _ := strconv.Atoi(u.Uid)
|
||||
gid, _ := strconv.Atoi(u.Gid)
|
||||
|
||||
c.Lock()
|
||||
c.System.Username = u.Username
|
||||
c.System.User.Uid = uid
|
||||
c.System.User.Gid = gid
|
||||
c.Unlock()
|
||||
|
||||
return c.WriteToDisk()
|
||||
}
|
||||
|
||||
// Writes the configuration to the disk as a blocking operation by obtaining an exclusive
|
||||
// lock on the file. This prevents something else from writing at the exact same time and
|
||||
// leading to bad data conditions.
|
||||
func (c *Configuration) WriteToDisk() error {
|
||||
// Obtain an exclusive write against the configuration file.
|
||||
c.writeLock.Lock()
|
||||
defer c.writeLock.Unlock()
|
||||
|
||||
ccopy := *c
|
||||
// If debugging is set with the flag, don't save that to the configuration file, otherwise
|
||||
// you'll always end up in debug mode.
|
||||
if _debugViaFlag {
|
||||
ccopy.Debug = false
|
||||
}
|
||||
|
||||
if c.path == "" {
|
||||
return errors.New("cannot write configuration, no path defined in struct")
|
||||
}
|
||||
|
||||
b, err := yaml.Marshal(&ccopy)
|
||||
u, err = user.Lookup(_config.System.Username)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
_config.System.User.Uid = system.MustInt(u.Uid)
|
||||
_config.System.User.Gid = system.MustInt(u.Gid)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(c.GetPath(), b, 0644); err != nil {
|
||||
return errors.WithStack(err)
|
||||
// FromFile reads the configuration from the provided file and stores it in the
|
||||
// global singleton for this instance.
|
||||
func FromFile(path string) error {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, err := NewAtPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Replace environment variables within the configuration file with their
|
||||
// values from the host system.
|
||||
b = []byte(os.ExpandEnv(string(b)))
|
||||
if err := yaml.Unmarshal(b, c); err != nil {
|
||||
return err
|
||||
}
|
||||
// Store this configuration in the global state.
|
||||
Set(c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigureDirectories ensures that all of the system directories exist on the
|
||||
// system. These directories are created so that only the owner can read the data,
|
||||
// and no other users.
|
||||
//
|
||||
// This function IS NOT thread-safe.
|
||||
func ConfigureDirectories() error {
|
||||
root := _config.System.RootDirectory
|
||||
log.WithField("path", root).Debug("ensuring root data directory exists")
|
||||
if err := os.MkdirAll(root, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// There are a non-trivial number of users out there whose data directories are actually a
|
||||
// symlink to another location on the disk. If we do not resolve that final destination at this
|
||||
// point things will appear to work, but endless errors will be encountered when we try to
|
||||
// verify accessed paths since they will all end up resolving outside the expected data directory.
|
||||
//
|
||||
// For the sake of automating away as much of this as possible, see if the data directory is a
|
||||
// symlink, and if so resolve to its final real path, and then update the configuration to use
|
||||
// that.
|
||||
if d, err := filepath.EvalSymlinks(_config.System.Data); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else if d != _config.System.Data {
|
||||
_config.System.Data = d
|
||||
}
|
||||
|
||||
log.WithField("path", _config.System.Data).Debug("ensuring server data directory exists")
|
||||
if err := os.MkdirAll(_config.System.Data, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithField("path", _config.System.ArchiveDirectory).Debug("ensuring archive data directory exists")
|
||||
if err := os.MkdirAll(_config.System.ArchiveDirectory, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithField("path", _config.System.BackupDirectory).Debug("ensuring backup data directory exists")
|
||||
if err := os.MkdirAll(_config.System.BackupDirectory, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnableLogRotation writes a logrotate file for wings to the system logrotate
|
||||
// configuration directory if one exists and a logrotate file is not found. This
|
||||
// allows us to basically automate away the log rotation for most installs, but
|
||||
// also enable users to make modifications on their own.
|
||||
//
|
||||
// This function IS NOT thread-safe.
|
||||
func EnableLogRotation() error {
|
||||
if !_config.System.EnableLogRotate {
|
||||
log.Info("skipping log rotate configuration, disabled in wings config file")
|
||||
return nil
|
||||
}
|
||||
|
||||
if st, err := os.Stat("/etc/logrotate.d"); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else if (err != nil && os.IsNotExist(err)) || !st.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if _, err := os.Stat("/etc/logrotate.d/wings"); err == nil || !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("no log rotation configuration found: adding file now")
|
||||
// If we've gotten to this point it means the logrotate directory exists on the system
|
||||
// but there is not a file for wings already. In that case, let us write a new file to
|
||||
// it so files can be rotated easily.
|
||||
f, err := os.Create("/etc/logrotate.d/wings")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
t, err := template.New("logrotate").Parse(`
|
||||
{{.LogDirectory}}/wings.log {
|
||||
size 10M
|
||||
compress
|
||||
delaycompress
|
||||
dateext
|
||||
maxage 7
|
||||
missingok
|
||||
notifempty
|
||||
create 0640 {{.User.Uid}} {{.User.Gid}}
|
||||
postrotate
|
||||
killall -SIGHUP wings
|
||||
endscript
|
||||
}`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Wrap(t.Execute(f, _config.System), "config: failed to write logrotate to disk")
|
||||
}
|
||||
|
||||
// GetStatesPath returns the location of the JSON file that tracks server states.
|
||||
func (sc *SystemConfiguration) GetStatesPath() string {
|
||||
return path.Join(sc.RootDirectory, "/states.json")
|
||||
}
|
||||
|
||||
// ConfigureTimezone sets the timezone data for the configuration if it is
|
||||
// currently missing. If a value has been set, this functionality will only run
|
||||
// to validate that the timezone being used is valid.
|
||||
//
|
||||
// This function IS NOT thread-safe.
|
||||
func ConfigureTimezone() error {
|
||||
tz := os.Getenv("TZ")
|
||||
if _config.System.Timezone == "" && tz != "" {
|
||||
_config.System.Timezone = tz
|
||||
}
|
||||
if _config.System.Timezone == "" {
|
||||
b, err := ioutil.ReadFile("/etc/timezone")
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.WithMessage(err, "config: failed to open timezone file")
|
||||
}
|
||||
|
||||
_config.System.Timezone = "UTC"
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
|
||||
defer cancel()
|
||||
// Okay, file isn't found on this OS, we will try using timedatectl to handle this. If this
|
||||
// command fails, exit, but if it returns a value use that. If no value is returned we will
|
||||
// fall through to UTC to get Wings booted at least.
|
||||
out, err := exec.CommandContext(ctx, "timedatectl").Output()
|
||||
if err != nil {
|
||||
log.WithField("error", err).Warn("failed to execute \"timedatectl\" to determine system timezone, falling back to UTC")
|
||||
return nil
|
||||
}
|
||||
|
||||
r := regexp.MustCompile(`Time zone: ([\w/]+)`)
|
||||
matches := r.FindSubmatch(out)
|
||||
if len(matches) != 2 || string(matches[1]) == "" {
|
||||
log.Warn("failed to parse timezone from \"timedatectl\" output, falling back to UTC")
|
||||
return nil
|
||||
}
|
||||
_config.System.Timezone = string(matches[1])
|
||||
} else {
|
||||
_config.System.Timezone = string(b)
|
||||
}
|
||||
}
|
||||
|
||||
_config.System.Timezone = regexp.MustCompile(`(?i)[^a-z_/]+`).ReplaceAllString(_config.System.Timezone, "")
|
||||
_, err := time.LoadLocation(_config.System.Timezone)
|
||||
|
||||
return errors.WithMessage(err, fmt.Sprintf("the supplied timezone %s is invalid", _config.System.Timezone))
|
||||
}
|
||||
|
||||
// Gets the system release name.
|
||||
func getSystemName() (string, error) {
|
||||
// use osrelease to get release version and ID
|
||||
if release, err := osrelease.Read(); err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
} else {
|
||||
release, err := osrelease.Read()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return release["ID"], nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ package config
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerNetworkInterfaces struct {
|
||||
@@ -12,7 +12,6 @@ type dockerNetworkInterfaces struct {
|
||||
Subnet string `default:"172.18.0.0/16"`
|
||||
Gateway string `default:"172.18.0.1"`
|
||||
}
|
||||
|
||||
V6 struct {
|
||||
Subnet string `default:"fdba:17c8:6c94::/64"`
|
||||
Gateway string `default:"fdba:17c8:6c94::1011"`
|
||||
@@ -39,8 +38,8 @@ type DockerNetworkConfiguration struct {
|
||||
Interfaces dockerNetworkInterfaces `yaml:"interfaces"`
|
||||
}
|
||||
|
||||
// Defines the docker configuration used by the daemon when interacting with
|
||||
// containers and networks on the system.
|
||||
// DockerConfiguration defines the docker configuration used by the daemon when
|
||||
// interacting with containers and networks on the system.
|
||||
type DockerConfiguration struct {
|
||||
// Network configuration that should be used when creating a new network
|
||||
// for containers run through the daemon.
|
||||
@@ -58,23 +57,22 @@ type DockerConfiguration struct {
|
||||
TmpfsSize uint `default:"100" json:"tmpfs_size" yaml:"tmpfs_size"`
|
||||
}
|
||||
|
||||
// RegistryConfiguration .
|
||||
// RegistryConfiguration defines the authentication credentials for a given
|
||||
// Docker registry.
|
||||
type RegistryConfiguration struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
}
|
||||
|
||||
// Base64 .
|
||||
// Base64 returns the authentication for a given registry as a base64 encoded
|
||||
// string value.
|
||||
func (c RegistryConfiguration) Base64() (string, error) {
|
||||
authConfig := types.AuthConfig{
|
||||
b, err := json.Marshal(types.AuthConfig{
|
||||
Username: c.Username,
|
||||
Password: c.Password,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(authConfig)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
return base64.URLEncoding.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
@@ -1,225 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Defines basic system configuration settings.
|
||||
type SystemConfiguration struct {
|
||||
// The root directory where all of the pterodactyl data is stored at.
|
||||
RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"`
|
||||
|
||||
// Directory where logs for server installations and other wings events are logged.
|
||||
LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"`
|
||||
|
||||
// Directory where the server data is stored at.
|
||||
Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"`
|
||||
|
||||
// Directory where server archives for transferring will be stored.
|
||||
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"`
|
||||
|
||||
// Directory where local backups will be stored on the machine.
|
||||
BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
|
||||
|
||||
// The user that should own all of the server files, and be used for containers.
|
||||
Username string `default:"pterodactyl" yaml:"username"`
|
||||
|
||||
// The timezone for this Wings instance. This is detected by Wings automatically if possible,
|
||||
// and falls back to UTC if not able to be detected. If you need to set this manually, that
|
||||
// can also be done.
|
||||
//
|
||||
// This timezone value is passed into all containers created by Wings.
|
||||
Timezone string `yaml:"timezone"`
|
||||
|
||||
// Definitions for the user that gets created to ensure that we can quickly access
|
||||
// this information without constantly having to do a system lookup.
|
||||
User struct {
|
||||
Uid int
|
||||
Gid int
|
||||
}
|
||||
|
||||
// The amount of time in seconds that can elapse before a server's disk space calculation is
|
||||
// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
|
||||
// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
|
||||
// process.
|
||||
DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"`
|
||||
|
||||
// Determines if Wings should detect a server that stops with a normal exit code of
|
||||
// "0" as being crashed if the process stopped without any Wings interaction. E.g.
|
||||
// the user did not press the stop button, but the process stopped cleanly.
|
||||
DetectCleanExitAsCrash bool `default:"true" yaml:"detect_clean_exit_as_crash"`
|
||||
|
||||
// If set to true, file permissions for a server will be checked when the process is
|
||||
// booted. This can cause boot delays if the server has a large amount of files. In most
|
||||
// cases disabling this should not have any major impact unless external processes are
|
||||
// frequently modifying a servers' files.
|
||||
CheckPermissionsOnBoot bool `default:"true" yaml:"check_permissions_on_boot"`
|
||||
|
||||
// If set to false Wings will not attempt to write a log rotate configuration to the disk
|
||||
// when it boots and one is not detected.
|
||||
EnableLogRotate bool `default:"true" yaml:"enable_log_rotate"`
|
||||
|
||||
Sftp SftpConfiguration `yaml:"sftp"`
|
||||
}
|
||||
|
||||
// Ensures that all of the system directories exist on the system. These directories are
|
||||
// created so that only the owner can read the data, and no other users.
|
||||
func (sc *SystemConfiguration) ConfigureDirectories() error {
|
||||
log.WithField("path", sc.RootDirectory).Debug("ensuring root data directory exists")
|
||||
if err := os.MkdirAll(sc.RootDirectory, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// There are a non-trivial number of users out there whose data directories are actually a
|
||||
// symlink to another location on the disk. If we do not resolve that final destination at this
|
||||
// point things will appear to work, but endless errors will be encountered when we try to
|
||||
// verify accessed paths since they will all end up resolving outside the expected data directory.
|
||||
//
|
||||
// For the sake of automating away as much of this as possible, see if the data directory is a
|
||||
// symlink, and if so resolve to its final real path, and then update the configuration to use
|
||||
// that.
|
||||
if d, err := filepath.EvalSymlinks(sc.Data); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else if d != sc.Data {
|
||||
sc.Data = d
|
||||
}
|
||||
|
||||
log.WithField("path", sc.Data).Debug("ensuring server data directory exists")
|
||||
if err := os.MkdirAll(sc.Data, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithField("path", sc.ArchiveDirectory).Debug("ensuring archive data directory exists")
|
||||
if err := os.MkdirAll(sc.ArchiveDirectory, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithField("path", sc.BackupDirectory).Debug("ensuring backup data directory exists")
|
||||
if err := os.MkdirAll(sc.BackupDirectory, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writes a logrotate file for wings to the system logrotate configuration directory if one
|
||||
// exists and a logrotate file is not found. This allows us to basically automate away the log
|
||||
// rotation for most installs, but also enable users to make modifications on their own.
|
||||
func (sc *SystemConfiguration) EnableLogRotation() error {
|
||||
// Do nothing if not enabled.
|
||||
if sc.EnableLogRotate == false {
|
||||
log.Info("skipping log rotate configuration, disabled in wings config file")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if st, err := os.Stat("/etc/logrotate.d"); err != nil && !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
} else if (err != nil && os.IsNotExist(err)) || !st.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := os.Stat("/etc/logrotate.d/wings"); err != nil && !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
} else if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("no log rotation configuration found, system is configured to support it, adding file now")
|
||||
// If we've gotten to this point it means the logrotate directory exists on the system
|
||||
// but there is not a file for wings already. In that case, let us write a new file to
|
||||
// it so files can be rotated easily.
|
||||
f, err := os.Create("/etc/logrotate.d/wings")
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
t, err := template.New("logrotate").Parse(`
|
||||
{{.LogDirectory}}/wings.log {
|
||||
size 10M
|
||||
compress
|
||||
delaycompress
|
||||
dateext
|
||||
maxage 7
|
||||
missingok
|
||||
notifempty
|
||||
create 0640 {{.User.Uid}} {{.User.Gid}}
|
||||
postrotate
|
||||
killall -SIGHUP wings
|
||||
endscript
|
||||
}`)
|
||||
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return errors.Wrap(t.Execute(f, sc), "failed to write logrotate file to disk")
|
||||
}
|
||||
|
||||
// Returns the location of the JSON file that tracks server states.
|
||||
func (sc *SystemConfiguration) GetStatesPath() string {
|
||||
return path.Join(sc.RootDirectory, "states.json")
|
||||
}
|
||||
|
||||
// Returns the location of the JSON file that tracks server states.
|
||||
func (sc *SystemConfiguration) GetInstallLogPath() string {
|
||||
return path.Join(sc.LogDirectory, "install/")
|
||||
}
|
||||
|
||||
// Configures the timezone data for the configuration if it is currently missing. If
|
||||
// a value has been set, this functionality will only run to validate that the timezone
|
||||
// being used is valid.
|
||||
func (sc *SystemConfiguration) ConfigureTimezone() error {
|
||||
if sc.Timezone == "" {
|
||||
if b, err := ioutil.ReadFile("/etc/timezone"); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "failed to open /etc/timezone for automatic server timezone calibration")
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second * 5)
|
||||
// Okay, file isn't found on this OS, we will try using timedatectl to handle this. If this
|
||||
// command fails, exit, but if it returns a value use that. If no value is returned we will
|
||||
// fall through to UTC to get Wings booted at least.
|
||||
out, err := exec.CommandContext(ctx, "timedatectl").Output()
|
||||
if err != nil {
|
||||
log.WithField("error", err).Warn("failed to execute \"timedatectl\" to determine system timezone, falling back to UTC")
|
||||
|
||||
sc.Timezone = "UTC"
|
||||
return nil
|
||||
}
|
||||
|
||||
r := regexp.MustCompile(`Time zone: ([\w/]+)`)
|
||||
matches := r.FindSubmatch(out)
|
||||
if len(matches) != 2 || string(matches[1]) == "" {
|
||||
log.Warn("failed to parse timezone from \"timedatectl\" output, falling back to UTC")
|
||||
|
||||
sc.Timezone = "UTC"
|
||||
return nil
|
||||
}
|
||||
|
||||
sc.Timezone = string(matches[1])
|
||||
} else {
|
||||
sc.Timezone = string(b)
|
||||
}
|
||||
}
|
||||
|
||||
sc.Timezone = regexp.MustCompile(`(?i)[^a-z_/]+`).ReplaceAllString(sc.Timezone, "")
|
||||
|
||||
_, err := time.LoadLocation(sc.Timezone)
|
||||
|
||||
return errors.Wrap(err, fmt.Sprintf("the supplied timezone %s is invalid", sc.Timezone))
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package config
|
||||
|
||||
type ConsoleThrottles struct {
|
||||
// Whether or not the throttler is enabled for this instance.
|
||||
Enabled bool `json:"enabled" yaml:"enabled" default:"true"`
|
||||
|
||||
// The total number of lines that can be output in a given LineResetInterval period before
|
||||
// a warning is triggered and counted against the server.
|
||||
Lines uint64 `json:"lines" yaml:"lines" default:"2000"`
|
||||
|
||||
// The total number of throttle activations that can accumulate before a server is considered
|
||||
// to be breaching and will be stopped. This value is decremented by one every DecayInterval.
|
||||
MaximumTriggerCount uint64 `json:"maximum_trigger_count" yaml:"maximum_trigger_count" default:"5"`
|
||||
|
||||
// The amount of time after which the number of lines processed is reset to 0. This runs in
|
||||
// a constant loop and is not affected by the current console output volumes. By default, this
|
||||
// will reset the processed line count back to 0 every 100ms.
|
||||
LineResetInterval uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"`
|
||||
|
||||
// The amount of time in milliseconds that must pass without an output warning being triggered
|
||||
// before a throttle activation is decremented.
|
||||
DecayInterval uint64 `json:"decay_interval" yaml:"decay_interval" default:"10000"`
|
||||
|
||||
// The amount of time that a server is allowed to be stopping for before it is terminated
|
||||
// forfully if it triggers output throttles.
|
||||
StopGracePeriod uint `json:"stop_grace_period" yaml:"stop_grace_period" default:"15"`
|
||||
}
|
||||
@@ -1,17 +1,20 @@
|
||||
version: '3.5'
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
daemon:
|
||||
build: .
|
||||
wings:
|
||||
image: ghcr.io/pterodactyl/wings:latest
|
||||
restart: always
|
||||
networks:
|
||||
- daemon0
|
||||
- wings0
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "2022:2022"
|
||||
tty: true
|
||||
environment:
|
||||
- "DEBUG=false"
|
||||
- "TZ=UTC" # change to the three letter timezone of your choosing
|
||||
TZ: "UTC"
|
||||
WINGS_UID: 988
|
||||
WINGS_GID: 988
|
||||
WINGS_USERNAME: pterodactyl
|
||||
volumes:
|
||||
- "/var/run/docker.sock:/var/run/docker.sock"
|
||||
- "/var/lib/docker/containers/:/var/lib/docker/containers/"
|
||||
@@ -19,17 +22,18 @@ services:
|
||||
- "/var/lib/pterodactyl/:/var/lib/pterodactyl/"
|
||||
- "/var/log/pterodactyl/:/var/log/pterodactyl/"
|
||||
- "/tmp/pterodactyl/:/tmp/pterodactyl/"
|
||||
## you may need /srv/daemon-data if you are upgrading from an old daemon
|
||||
## - "/srv/daemon-data/:/srv/daemon-data/"
|
||||
## Required for ssl if you user let's encrypt. uncomment to use.
|
||||
## - "/etc/letsencrypt/:/etc/letsencrypt/"
|
||||
- "/etc/ssl/certs:/etc/ssl/certs:ro"
|
||||
# you may need /srv/daemon-data if you are upgrading from an old daemon
|
||||
#- "/srv/daemon-data/:/srv/daemon-data/"
|
||||
# Required for ssl if you use let's encrypt. uncomment to use.
|
||||
#- "/etc/letsencrypt/:/etc/letsencrypt/"
|
||||
|
||||
networks:
|
||||
daemon0:
|
||||
name: daemon0
|
||||
wings0:
|
||||
name: wings0
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: "172.21.0.0/16"
|
||||
driver_opts:
|
||||
com.docker.network.bridge.name: daemon0
|
||||
com.docker.network.bridge.name: wings0
|
||||
|
||||
@@ -2,9 +2,10 @@ package environment
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Defines the allocations available for a given server. When using the Docker environment
|
||||
@@ -38,15 +39,16 @@ func (a *Allocations) Bindings() nat.PortMap {
|
||||
continue
|
||||
}
|
||||
|
||||
binding := []nat.PortBinding{
|
||||
{
|
||||
binding := nat.PortBinding{
|
||||
HostIP: ip,
|
||||
HostPort: strconv.Itoa(port),
|
||||
},
|
||||
}
|
||||
|
||||
out[nat.Port(fmt.Sprintf("%d/tcp", port))] = binding
|
||||
out[nat.Port(fmt.Sprintf("%d/udp", port))] = binding
|
||||
tcp := nat.Port(fmt.Sprintf("%d/tcp", port))
|
||||
udp := nat.Port(fmt.Sprintf("%d/udp", port))
|
||||
|
||||
out[tcp] = append(out[tcp], binding)
|
||||
out[udp] = append(out[udp], binding)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,115 +2,103 @@ package environment
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/apex/log"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
)
|
||||
|
||||
var _cmu sync.Mutex
|
||||
var _conce sync.Once
|
||||
var _client *client.Client
|
||||
|
||||
// Return a Docker client to be used throughout the codebase. Once a client has been created it
|
||||
// will be returned for all subsequent calls to this function.
|
||||
func DockerClient() (*client.Client, error) {
|
||||
_cmu.Lock()
|
||||
defer _cmu.Unlock()
|
||||
|
||||
if _client != nil {
|
||||
return _client, nil
|
||||
// Docker returns a docker client to be used throughout the codebase. Once a
|
||||
// client has been created it will be returned for all subsequent calls to this
|
||||
// function.
|
||||
func Docker() (*client.Client, error) {
|
||||
var err error
|
||||
_conce.Do(func() {
|
||||
_client, err = client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
})
|
||||
return _client, errors.Wrap(err, "environment/docker: could not create client")
|
||||
}
|
||||
|
||||
_client, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
|
||||
return _client, err
|
||||
}
|
||||
|
||||
// Configures the required network for the docker environment.
|
||||
func ConfigureDocker(c *config.DockerConfiguration) error {
|
||||
// ConfigureDocker configures the required network for the docker environment.
|
||||
func ConfigureDocker(ctx context.Context) error {
|
||||
// Ensure the required docker network exists on the system.
|
||||
cli, err := DockerClient()
|
||||
cli, err := Docker()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resource, err := cli.NetworkInspect(context.Background(), c.Network.Name, types.NetworkInspectOptions{})
|
||||
if err != nil && client.IsErrNotFound(err) {
|
||||
nw := config.Get().Docker.Network
|
||||
resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{})
|
||||
if err != nil {
|
||||
if client.IsErrNotFound(err) {
|
||||
log.Info("creating missing pterodactyl0 interface, this could take a few seconds...")
|
||||
return createDockerNetwork(cli, c)
|
||||
} else if err != nil {
|
||||
log.WithField("error", err).Fatal("failed to create required docker network for containers")
|
||||
if err := createDockerNetwork(ctx, cli); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch resource.Driver {
|
||||
config.Update(func(c *config.Configuration) {
|
||||
c.Docker.Network.Driver = resource.Driver
|
||||
switch c.Docker.Network.Driver {
|
||||
case "host":
|
||||
c.Network.Interface = "127.0.0.1"
|
||||
c.Network.ISPN = false
|
||||
return nil
|
||||
c.Docker.Network.Interface = "127.0.0.1"
|
||||
c.Docker.Network.ISPN = false
|
||||
case "overlay":
|
||||
fallthrough
|
||||
case "weavemesh":
|
||||
c.Network.Interface = ""
|
||||
c.Network.ISPN = true
|
||||
return nil
|
||||
c.Docker.Network.Interface = ""
|
||||
c.Docker.Network.ISPN = true
|
||||
default:
|
||||
c.Network.ISPN = false
|
||||
c.Docker.Network.ISPN = false
|
||||
}
|
||||
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates a new network on the machine if one does not exist already.
|
||||
func createDockerNetwork(cli *client.Client, c *config.DockerConfiguration) error {
|
||||
_, err := cli.NetworkCreate(context.Background(), c.Network.Name, types.NetworkCreate{
|
||||
Driver: c.Network.Driver,
|
||||
func createDockerNetwork(ctx context.Context, cli *client.Client) error {
|
||||
nw := config.Get().Docker.Network
|
||||
_, err := cli.NetworkCreate(ctx, nw.Name, types.NetworkCreate{
|
||||
Driver: nw.Driver,
|
||||
EnableIPv6: true,
|
||||
Internal: c.Network.IsInternal,
|
||||
Internal: nw.IsInternal,
|
||||
IPAM: &network.IPAM{
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: c.Network.Interfaces.V4.Subnet,
|
||||
Gateway: c.Network.Interfaces.V4.Gateway,
|
||||
},
|
||||
{
|
||||
Subnet: c.Network.Interfaces.V6.Subnet,
|
||||
Gateway: c.Network.Interfaces.V6.Gateway,
|
||||
},
|
||||
},
|
||||
Config: []network.IPAMConfig{{
|
||||
Subnet: nw.Interfaces.V4.Subnet,
|
||||
Gateway: nw.Interfaces.V4.Gateway,
|
||||
}, {
|
||||
Subnet: nw.Interfaces.V6.Subnet,
|
||||
Gateway: nw.Interfaces.V6.Gateway,
|
||||
}},
|
||||
},
|
||||
Options: map[string]string{
|
||||
"encryption": "false",
|
||||
"com.docker.network.bridge.default_bridge": "false",
|
||||
"com.docker.network.bridge.enable_icc": strconv.FormatBool(c.Network.EnableICC),
|
||||
"com.docker.network.bridge.enable_icc": strconv.FormatBool(nw.EnableICC),
|
||||
"com.docker.network.bridge.enable_ip_masquerade": "true",
|
||||
"com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
|
||||
"com.docker.network.bridge.name": "pterodactyl0",
|
||||
"com.docker.network.driver.mtu": "1500",
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch c.Network.Driver {
|
||||
case "host":
|
||||
c.Network.Interface = "127.0.0.1"
|
||||
c.Network.ISPN = false
|
||||
break
|
||||
case "overlay":
|
||||
case "weavemesh":
|
||||
c.Network.Interface = ""
|
||||
c.Network.ISPN = true
|
||||
break
|
||||
default:
|
||||
c.Network.Interface = c.Network.Interfaces.V4.Gateway
|
||||
c.Network.ISPN = false
|
||||
break
|
||||
if nw.Driver != "host" && nw.Driver != "overlay" && nw.Driver != "weavemesh" {
|
||||
config.Update(func(c *config.Configuration) {
|
||||
c.Docker.Network.Interface = c.Docker.Network.Interfaces.V4.Gateway
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
package docker
|
||||
|
||||
import "io"
|
||||
|
||||
type Console struct {
|
||||
HandlerFunc *func(string)
|
||||
}
|
||||
|
||||
var _ io.Writer = Console{}
|
||||
|
||||
func (c Console) Write(b []byte) (int, error) {
|
||||
if c.HandlerFunc != nil {
|
||||
l := make([]byte, len(b))
|
||||
copy(l, b)
|
||||
|
||||
(*c.HandlerFunc)(string(l))
|
||||
}
|
||||
|
||||
return len(b), nil
|
||||
}
|
||||
@@ -2,41 +2,55 @@ package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/daemon/logger/jsonfilelog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
type imagePullStatus struct {
|
||||
Status string `json:"status"`
|
||||
Progress string `json:"progress"`
|
||||
var ErrNotAttached = errors.Sentinel("not attached to instance")
|
||||
|
||||
// A custom console writer that allows us to keep a function blocked until the
|
||||
// given stream is properly closed. This does nothing special, only exists to
|
||||
// make a noop io.Writer.
|
||||
type noopWriter struct{}
|
||||
|
||||
var _ io.Writer = noopWriter{}
|
||||
|
||||
// Implement the required Write function to satisfy the io.Writer interface.
|
||||
func (nw noopWriter) Write(b []byte) (int, error) {
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Attaches to the docker container itself and ensures that we can pipe data in and out
|
||||
// of the process stream. This should not be used for reading console data as you *will*
|
||||
// miss important output at the beginning because of the time delay with attaching to the
|
||||
// output.
|
||||
// Attach attaches to the docker container itself and ensures that we can pipe
|
||||
// data in and out of the process stream. This should not be used for reading
|
||||
// console data as you *will* miss important output at the beginning because of
|
||||
// the time delay with attaching to the output.
|
||||
//
|
||||
// Calling this function will poll resources for the container in the background
|
||||
// until the provided context is canceled by the caller. Failure to cancel said
|
||||
// context will cause background memory leaks as the goroutine will not exit.
|
||||
func (e *Environment) Attach() error {
|
||||
if e.IsAttached() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := e.followOutput(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
opts := types.ContainerAttachOptions{
|
||||
@@ -48,63 +62,59 @@ func (e *Environment) Attach() error {
|
||||
|
||||
// Set the stream again with the container.
|
||||
if st, err := e.client.ContainerAttach(context.Background(), e.Id, opts); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
} else {
|
||||
e.SetStream(&st)
|
||||
}
|
||||
|
||||
c := new(Console)
|
||||
go func(console *Console) {
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
defer cancel()
|
||||
defer e.stream.Close()
|
||||
defer func() {
|
||||
e.setState(environment.ProcessOfflineState)
|
||||
e.SetState(environment.ProcessOfflineState)
|
||||
e.SetStream(nil)
|
||||
}()
|
||||
|
||||
// Poll resources in a separate thread since this will block the copy call below
|
||||
// from being reached until it is completed if not run in a separate process. However,
|
||||
// we still want it to be stopped when the copy operation below is finished running which
|
||||
// indicates that the container is no longer running.
|
||||
go func(ctx context.Context) {
|
||||
go func() {
|
||||
if err := e.pollResources(ctx); err != nil {
|
||||
log.WithField("environment_id", e.Id).WithField("error", errors.WithStack(err)).Error("error during environment resource polling")
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
e.log().WithField("error", err).Error("error during environment resource polling")
|
||||
} else {
|
||||
e.log().Warn("stopping server resource polling: context canceled")
|
||||
}
|
||||
}(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream the reader output to the console which will then fire off events and handle console
|
||||
// throttling and sending the output to the user.
|
||||
if _, err := io.Copy(console, e.stream.Reader); err != nil {
|
||||
log.WithField("environment_id", e.Id).WithField("error", errors.WithStack(err)).Error("error while copying environment output to console")
|
||||
// Block the completion of this routine until the container is no longer running. This allows
|
||||
// the pollResources function to run until it needs to be stopped. Because the container
|
||||
// can be polled for resource usage, even when stopped, we need to have this logic present
|
||||
// in order to cancel the context and therefore stop the routine that is spawned.
|
||||
//
|
||||
// For now, DO NOT use client#ContainerWait from the Docker package. There is a nasty
|
||||
// bug causing containers to hang on deletion and cause servers to lock up on the system.
|
||||
//
|
||||
// This weird code isn't intuitive, but it keeps the function from ending until the container
|
||||
// is stopped and therefore the stream reader ends up closed.
|
||||
// @see https://github.com/moby/moby/issues/41827
|
||||
c := new(noopWriter)
|
||||
if _, err := io.Copy(c, e.stream.Reader); err != nil {
|
||||
e.log().WithField("error", err).Error("could not copy from environment stream to noop writer")
|
||||
}
|
||||
}(c)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Environment) resources() container.Resources {
|
||||
l := e.Configuration.Limits()
|
||||
|
||||
return container.Resources{
|
||||
Memory: l.BoundedMemoryLimit(),
|
||||
MemoryReservation: l.MemoryLimit * 1_000_000,
|
||||
MemorySwap: l.ConvertedSwap(),
|
||||
CPUQuota: l.ConvertedCpuLimit(),
|
||||
CPUPeriod: 100_000,
|
||||
CPUShares: 1024,
|
||||
BlkioWeight: l.IoWeight,
|
||||
OomKillDisable: &l.OOMDisabled,
|
||||
CpusetCpus: l.Threads,
|
||||
}
|
||||
}
|
||||
|
||||
// Performs an in-place update of the Docker container's resource limits without actually
|
||||
// making any changes to the operational state of the container. This allows memory, cpu,
|
||||
// and IO limitations to be adjusted on the fly for individual instances.
|
||||
// InSituUpdate performs an in-place update of the Docker container's resource
|
||||
// limits without actually making any changes to the operational state of the
|
||||
// container. This allows memory, cpu, and IO limitations to be adjusted on the
|
||||
// fly for individual instances.
|
||||
func (e *Environment) InSituUpdate() error {
|
||||
if _, err := e.client.ContainerInspect(context.Background(), e.Id); err != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
if _, err := e.client.ContainerInspect(ctx, e.Id); err != nil {
|
||||
// If the container doesn't exist for some reason there really isn't anything
|
||||
// we can do to fix that in this process (it doesn't make sense at least). In those
|
||||
// cases just return without doing anything since we still want to save the configuration
|
||||
@@ -114,25 +124,24 @@ func (e *Environment) InSituUpdate() error {
|
||||
if client.IsErrNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrap(err, "environment/docker: could not inspect container")
|
||||
}
|
||||
|
||||
u := container.UpdateConfig{
|
||||
// CPU pinning cannot be removed once it is applied to a container. The same is true
|
||||
// for removing memory limits, a container must be re-created.
|
||||
//
|
||||
// @see https://github.com/moby/moby/issues/41946
|
||||
if _, err := e.client.ContainerUpdate(ctx, e.Id, container.UpdateConfig{
|
||||
Resources: e.resources(),
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "environment/docker: could not update container")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
if _, err := e.client.ContainerUpdate(ctx, e.Id, u); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creates a new container for the server using all of the data that is currently
|
||||
// available for it. If the container already exists it will be returnee.
|
||||
// Create creates a new container for the server using all of the data that is
|
||||
// currently available for it. If the container already exists it will be
|
||||
// returned.
|
||||
func (e *Environment) Create() error {
|
||||
// If the container already exists don't hit the user with an error, just return
|
||||
// the current information about it which is what we would do when creating the
|
||||
@@ -140,12 +149,12 @@ func (e *Environment) Create() error {
|
||||
if _, err := e.client.ContainerInspect(context.Background(), e.Id); err == nil {
|
||||
return nil
|
||||
} else if !client.IsErrNotFound(err) {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Try to pull the requested image before creating the container.
|
||||
if err := e.ensureImageExists(e.meta.Image); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
a := e.Configuration.Allocations()
|
||||
@@ -207,6 +216,7 @@ func (e *Environment) Create() error {
|
||||
Config: map[string]string{
|
||||
"max-size": "5m",
|
||||
"max-file": "1",
|
||||
"compress": "false",
|
||||
},
|
||||
},
|
||||
|
||||
@@ -219,33 +229,18 @@ func (e *Environment) Create() error {
|
||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||
}
|
||||
|
||||
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, e.Id); err != nil {
|
||||
return errors.WithStack(err)
|
||||
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Environment) convertMounts() []mount.Mount {
|
||||
var out []mount.Mount
|
||||
|
||||
for _, m := range e.Configuration.Mounts() {
|
||||
out = append(out, mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
Source: m.Source,
|
||||
Target: m.Target,
|
||||
ReadOnly: m.ReadOnly,
|
||||
})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Remove the Docker container from the machine. If the container is currently running
|
||||
// it will be forcibly stopped by Docker.
|
||||
// Destroy will remove the Docker container from the server. If the container
|
||||
// is currently running it will be forcibly stopped by Docker.
|
||||
func (e *Environment) Destroy() error {
|
||||
// We set it to stopping than offline to prevent crash detection from being triggered.
|
||||
e.setState(environment.ProcessStoppingState)
|
||||
e.SetState(environment.ProcessStoppingState)
|
||||
|
||||
err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
@@ -253,6 +248,8 @@ func (e *Environment) Destroy() error {
|
||||
Force: true,
|
||||
})
|
||||
|
||||
e.SetState(environment.ProcessOfflineState)
|
||||
|
||||
// Don't trigger a destroy failure if we try to delete a container that does not
|
||||
// exist on the system. We're just a step ahead of ourselves in that case.
|
||||
//
|
||||
@@ -261,20 +258,63 @@ func (e *Environment) Destroy() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
e.setState(environment.ProcessOfflineState)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Attaches to the log for the container. This avoids us missing crucial output that
|
||||
// happens in the split seconds before the code moves from 'Starting' to 'Attaching'
|
||||
// on the process.
|
||||
// SendCommand sends the specified command to the stdin of the running container
|
||||
// instance. There is no confirmation that this data is sent successfully, only
|
||||
// that it gets pushed into the stdin.
|
||||
func (e *Environment) SendCommand(c string) error {
|
||||
if !e.IsAttached() {
|
||||
return errors.Wrap(ErrNotAttached, "environment/docker: cannot send command to container")
|
||||
}
|
||||
|
||||
e.mu.RLock()
|
||||
defer e.mu.RUnlock()
|
||||
|
||||
// If the command being processed is the same as the process stop command then we
|
||||
// want to mark the server as entering the stopping state otherwise the process will
|
||||
// stop and Wings will think it has crashed and attempt to restart it.
|
||||
if e.meta.Stop.Type == "command" && c == e.meta.Stop.Value {
|
||||
e.SetState(environment.ProcessStoppingState)
|
||||
}
|
||||
|
||||
_, err := e.stream.Conn.Write([]byte(c + "\n"))
|
||||
|
||||
return errors.Wrap(err, "environment/docker: could not write to container stream")
|
||||
}
|
||||
|
||||
// Readlog reads the log file for the server. This does not care if the server
|
||||
// is running or not, it will simply try to read the last X bytes of the file
|
||||
// and return them.
|
||||
func (e *Environment) Readlog(lines int) ([]string, error) {
|
||||
r, err := e.client.ContainerLogs(context.Background(), e.Id, types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Tail: strconv.Itoa(lines),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
var out []string
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
out = append(out, scanner.Text())
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Attaches to the log for the container. This avoids us missing crucial output
|
||||
// that happens in the split seconds before the code moves from 'Starting' to
|
||||
// 'Attaching' on the process.
|
||||
func (e *Environment) followOutput() error {
|
||||
if exists, err := e.Exists(); !exists {
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New(fmt.Sprintf("no such container: %s", e.Id))
|
||||
}
|
||||
|
||||
@@ -286,69 +326,55 @@ func (e *Environment) followOutput() error {
|
||||
}
|
||||
|
||||
reader, err := e.client.ContainerLogs(context.Background(), e.Id, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func(reader io.ReadCloser) {
|
||||
go e.scanOutput(reader)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Environment) scanOutput(reader io.ReadCloser) {
|
||||
defer reader.Close()
|
||||
|
||||
r := bufio.NewReader(reader)
|
||||
ParentLoop:
|
||||
for {
|
||||
var b bytes.Buffer
|
||||
var line []byte
|
||||
var isPrefix bool
|
||||
events := e.Events()
|
||||
|
||||
for {
|
||||
// Read the line and write it to the buffer.
|
||||
line, isPrefix, err = r.ReadLine()
|
||||
|
||||
// Certain games like Minecraft output absolutely random carriage returns in the output seemingly
|
||||
// in line with that it thinks is the terminal size. Those returns break a lot of output handling,
|
||||
// so we'll just replace them with proper new-lines and then split it later and send each line as
|
||||
// its own event in the response.
|
||||
b.Write(bytes.ReplaceAll(line, []byte(" \r"), []byte("\r\n")))
|
||||
|
||||
// Finish this loop and begin outputting the line if there is no prefix (the line fit into
|
||||
// the default buffer), or if we hit the end of the line.
|
||||
if !isPrefix || err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
// If we encountered an error with something in ReadLine that was not an EOF just abort
|
||||
// the entire process here.
|
||||
if err != nil {
|
||||
break ParentLoop
|
||||
}
|
||||
}
|
||||
|
||||
// Publish the line for this loop. Break on new-line characters so every line is sent as a single
|
||||
// output event, otherwise you get funky handling in the browser console.
|
||||
for _, line := range strings.Split(b.String(), "\r\n") {
|
||||
e.Events().Publish(environment.ConsoleOutputEvent, line)
|
||||
}
|
||||
|
||||
// If the error we got previously that lead to the line being output is an io.EOF we want to
|
||||
// exit the entire looping process.
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
err := system.ScanReader(reader, func(line string) {
|
||||
events.Publish(environment.ConsoleOutputEvent, line)
|
||||
})
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
log.WithField("error", err).WithField("container_id", e.Id).Warn("error processing scanner line in console output")
|
||||
}
|
||||
}(reader)
|
||||
|
||||
return errors.WithStack(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Pulls the image from Docker. If there is an error while pulling the image from the source
|
||||
// but the image already exists locally, we will report that error to the logger but continue
|
||||
// with the process.
|
||||
// Return here if the server is offline or currently stopping.
|
||||
if e.State() == environment.ProcessStoppingState || e.State() == environment.ProcessOfflineState {
|
||||
return
|
||||
}
|
||||
|
||||
// Close the current reader before starting a new one, the defer will still run
|
||||
// but it will do nothing if we already closed the stream.
|
||||
_ = reader.Close()
|
||||
|
||||
// Start following the output of the server again.
|
||||
go e.followOutput()
|
||||
}
|
||||
|
||||
type imagePullStatus struct {
|
||||
Status string `json:"status"`
|
||||
Progress string `json:"progress"`
|
||||
}
|
||||
|
||||
// Pulls the image from Docker. If there is an error while pulling the image
|
||||
// from the source but the image already exists locally, we will report that
|
||||
// error to the logger but continue with the process.
|
||||
//
|
||||
// The reasoning behind this is that Quay has had some serious outages as of late, and we don't
|
||||
// need to block all of the servers from booting just because of that. I'd imagine in a lot of
|
||||
// cases an outage shouldn't affect users too badly. It'll at least keep existing servers working
|
||||
// correctly if anything.
|
||||
// The reasoning behind this is that Quay has had some serious outages as of
|
||||
// late, and we don't need to block all of the servers from booting just because
|
||||
// of that. I'd imagine in a lot of cases an outage shouldn't affect users too
|
||||
// badly. It'll at least keep existing servers working correctly if anything.
|
||||
func (e *Environment) ensureImageExists(image string) error {
|
||||
e.Events().Publish(environment.DockerImagePullStarted, "")
|
||||
defer e.Events().Publish(environment.DockerImagePullCompleted, "")
|
||||
@@ -424,9 +450,11 @@ func (e *Environment) ensureImageExists(image string) error {
|
||||
// I'm not sure what the best approach here is, but this will block execution until the image
|
||||
// is done being pulled, which is what we need.
|
||||
scanner := bufio.NewScanner(out)
|
||||
|
||||
for scanner.Scan() {
|
||||
s := imagePullStatus{}
|
||||
fmt.Println(scanner.Text())
|
||||
|
||||
if err := json.Unmarshal(scanner.Bytes(), &s); err == nil {
|
||||
e.Events().Publish(environment.DockerImagePullStatus, s.Status+" "+s.Progress)
|
||||
}
|
||||
@@ -440,3 +468,34 @@ func (e *Environment) ensureImageExists(image string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Environment) convertMounts() []mount.Mount {
|
||||
var out []mount.Mount
|
||||
|
||||
for _, m := range e.Configuration.Mounts() {
|
||||
out = append(out, mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
Source: m.Source,
|
||||
Target: m.Target,
|
||||
ReadOnly: m.ReadOnly,
|
||||
})
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func (e *Environment) resources() container.Resources {
|
||||
l := e.Configuration.Limits()
|
||||
|
||||
return container.Resources{
|
||||
Memory: l.BoundedMemoryLimit(),
|
||||
MemoryReservation: l.MemoryLimit * 1_000_000,
|
||||
MemorySwap: l.ConvertedSwap(),
|
||||
CPUQuota: l.ConvertedCpuLimit(),
|
||||
CPUPeriod: 100_000,
|
||||
CPUShares: 1024,
|
||||
BlkioWeight: l.IoWeight,
|
||||
OomKillDisable: &l.OOMDisabled,
|
||||
CpusetCpus: l.Threads,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,19 +2,23 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/events"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/events"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
type Metadata struct {
|
||||
Image string
|
||||
Stop api.ProcessStopConfiguration
|
||||
Stop remote.ProcessStopConfiguration
|
||||
}
|
||||
|
||||
// Ensure that the Docker environment is always implementing all of the methods
|
||||
@@ -23,7 +27,7 @@ var _ environment.ProcessEnvironment = (*Environment)(nil)
|
||||
|
||||
type Environment struct {
|
||||
mu sync.RWMutex
|
||||
eventMu sync.Mutex
|
||||
eventMu sync.Once
|
||||
|
||||
// The public identifier for this environment. In this case it is the Docker container
|
||||
// name that will be used for all instances created under it.
|
||||
@@ -47,15 +51,15 @@ type Environment struct {
|
||||
emitter *events.EventBus
|
||||
|
||||
// Tracks the environment state.
|
||||
st string
|
||||
stMu sync.RWMutex
|
||||
st *system.AtomicString
|
||||
}
|
||||
|
||||
// Creates a new base Docker environment. The ID passed through will be the ID that is used to
|
||||
// reference the container from here on out. This should be unique per-server (we use the UUID
|
||||
// by default). The container does not need to exist at this point.
|
||||
// New creates a new base Docker environment. The ID passed through will be the
|
||||
// ID that is used to reference the container from here on out. This should be
|
||||
// unique per-server (we use the UUID by default). The container does not need
|
||||
// to exist at this point.
|
||||
func New(id string, m *Metadata, c *environment.Configuration) (*Environment, error) {
|
||||
cli, err := environment.DockerClient()
|
||||
cli, err := environment.Docker()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -65,11 +69,16 @@ func New(id string, m *Metadata, c *environment.Configuration) (*Environment, er
|
||||
Configuration: c,
|
||||
meta: m,
|
||||
client: cli,
|
||||
st: system.NewAtomicString(environment.ProcessOfflineState),
|
||||
}
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (e *Environment) log() *log.Entry {
|
||||
return log.WithField("environment", e.Type()).WithField("container_id", e.Id)
|
||||
}
|
||||
|
||||
func (e *Environment) Type() string {
|
||||
return "docker"
|
||||
}
|
||||
@@ -77,8 +86,9 @@ func (e *Environment) Type() string {
|
||||
// Set if this process is currently attached to the process.
|
||||
func (e *Environment) SetStream(s *types.HijackedResponse) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
e.stream = s
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// Determine if the this process is currently attached to the container.
|
||||
@@ -90,12 +100,9 @@ func (e *Environment) IsAttached() bool {
|
||||
}
|
||||
|
||||
func (e *Environment) Events() *events.EventBus {
|
||||
e.eventMu.Lock()
|
||||
defer e.eventMu.Unlock()
|
||||
|
||||
if e.emitter == nil {
|
||||
e.eventMu.Do(func() {
|
||||
e.emitter = events.New()
|
||||
}
|
||||
})
|
||||
|
||||
return e.emitter
|
||||
}
|
||||
@@ -155,7 +162,7 @@ func (e *Environment) ExitState() (uint32, bool, error) {
|
||||
return 1, false, nil
|
||||
}
|
||||
|
||||
return 0, false, errors.WithStack(err)
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
return uint32(c.State.ExitCode), c.State.OOMKilled, nil
|
||||
@@ -171,14 +178,39 @@ func (e *Environment) Config() *environment.Configuration {
|
||||
}
|
||||
|
||||
// Sets the stop configuration for the environment.
|
||||
func (e *Environment) SetStopConfiguration(c api.ProcessStopConfiguration) {
|
||||
func (e *Environment) SetStopConfiguration(c remote.ProcessStopConfiguration) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
e.meta.Stop = c
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
func (e *Environment) SetImage(i string) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
e.meta.Image = i
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
func (e *Environment) State() string {
|
||||
return e.st.Load()
|
||||
}
|
||||
|
||||
// SetState sets the state of the environment. This emits an event that server's
|
||||
// can hook into to take their own actions and track their own state based on
|
||||
// the environment.
|
||||
func (e *Environment) SetState(state string) {
|
||||
if state != environment.ProcessOfflineState &&
|
||||
state != environment.ProcessStartingState &&
|
||||
state != environment.ProcessRunningState &&
|
||||
state != environment.ProcessStoppingState {
|
||||
panic(errors.New(fmt.Sprintf("invalid server state received: %s", state)))
|
||||
}
|
||||
|
||||
// Emit the event to any listeners that are currently registered.
|
||||
if e.State() != state {
|
||||
// If the state changed make sure we update the internal tracking to note that.
|
||||
e.st.Store(state)
|
||||
e.Events().Publish(environment.StateChangeEvent, state)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,16 +2,18 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
)
|
||||
|
||||
// Run before the container starts and get the process configuration from the Panel.
|
||||
@@ -20,13 +22,12 @@ import (
|
||||
//
|
||||
// This process will also confirm that the server environment exists and is in a bootable
|
||||
// state. This ensures that unexpected container deletion while Wings is running does
|
||||
// not result in the server becoming unbootable.
|
||||
// not result in the server becoming un-bootable.
|
||||
func (e *Environment) OnBeforeStart() error {
|
||||
// Always destroy and re-create the server container to ensure that synced data from
|
||||
// the Panel is usee.
|
||||
// Always destroy and re-create the server container to ensure that synced data from the Panel is used.
|
||||
if err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
||||
if !client.IsErrNotFound(err) {
|
||||
return errors.Wrap(err, "failed to remove server docker container during pre-boot")
|
||||
return errors.WithMessage(err, "failed to remove server docker container during pre-boot")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,6 +50,7 @@ func (e *Environment) OnBeforeStart() error {
|
||||
// call to OnBeforeStart().
|
||||
func (e *Environment) Start() error {
|
||||
sawError := false
|
||||
|
||||
// If sawError is set to true there was an error somewhere in the pipeline that
|
||||
// got passed up, but we also want to ensure we set the server to be offline at
|
||||
// that point.
|
||||
@@ -57,8 +59,8 @@ func (e *Environment) Start() error {
|
||||
// If we don't set it to stopping first, you'll trigger crash detection which
|
||||
// we don't want to do at this point since it'll just immediately try to do the
|
||||
// exact same action that lead to it crashing in the first place...
|
||||
e.setState(environment.ProcessStoppingState)
|
||||
e.setState(environment.ProcessOfflineState)
|
||||
e.SetState(environment.ProcessStoppingState)
|
||||
e.SetState(environment.ProcessOfflineState)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -69,12 +71,12 @@ func (e *Environment) Start() error {
|
||||
//
|
||||
// @see https://github.com/pterodactyl/panel/issues/2000
|
||||
if !client.IsErrNotFound(err) {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// If the server is running update our internal state and continue on with the attach.
|
||||
if c.State.Running {
|
||||
e.setState(environment.ProcessRunningState)
|
||||
e.SetState(environment.ProcessRunningState)
|
||||
|
||||
return e.Attach()
|
||||
}
|
||||
@@ -84,12 +86,12 @@ func (e *Environment) Start() error {
|
||||
// to truncate them.
|
||||
if _, err := os.Stat(c.LogPath); err == nil {
|
||||
if err := os.Truncate(c.LogPath, 0); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
e.setState(environment.ProcessStartingState)
|
||||
e.SetState(environment.ProcessStartingState)
|
||||
|
||||
// Set this to true for now, we will set it to false once we reach the
|
||||
// end of this chain.
|
||||
@@ -99,14 +101,14 @@ func (e *Environment) Start() error {
|
||||
// exists on the system, and rebuild the container if that is required for server booting to
|
||||
// occur.
|
||||
if err := e.OnBeforeStart(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
||||
defer cancel()
|
||||
|
||||
if err := e.client.ContainerStart(ctx, e.Id, types.ContainerStartOptions{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// No errors, good to continue through.
|
||||
@@ -115,61 +117,76 @@ func (e *Environment) Start() error {
|
||||
return e.Attach()
|
||||
}
|
||||
|
||||
// Stops the container that the server is running in. This will allow up to 30 seconds to pass
|
||||
// before the container is forcefully terminated if we are trying to stop it without using a command
|
||||
// sent into the instance.
|
||||
// Stop stops the container that the server is running in. This will allow up to
|
||||
// 30 seconds to pass before the container is forcefully terminated if we are
|
||||
// trying to stop it without using a command sent into the instance.
|
||||
//
|
||||
// You most likely want to be using WaitForStop() rather than this function, since this will return
|
||||
// as soon as the command is sent, rather than waiting for the process to be completed stopped.
|
||||
// You most likely want to be using WaitForStop() rather than this function,
|
||||
// since this will return as soon as the command is sent, rather than waiting
|
||||
// for the process to be completed stopped.
|
||||
//
|
||||
// TODO: pass context through from the server instance.
|
||||
func (e *Environment) Stop() error {
|
||||
e.mu.RLock()
|
||||
s := e.meta.Stop
|
||||
e.mu.RUnlock()
|
||||
|
||||
if s.Type == "" || s.Type == api.ProcessStopSignal {
|
||||
// A native "stop" as the Type field value will just skip over all of this
|
||||
// logic and end up only executing the container stop command (which may or
|
||||
// may not work as expected).
|
||||
if s.Type == "" || s.Type == remote.ProcessStopSignal {
|
||||
if s.Type == "" {
|
||||
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using termination procedure")
|
||||
}
|
||||
|
||||
return e.Terminate(os.Kill)
|
||||
signal := os.Kill
|
||||
// Handle a few common cases, otherwise just fall through and just pass along
|
||||
// the os.Kill signal to the process.
|
||||
switch strings.ToUpper(s.Value) {
|
||||
case "SIGABRT":
|
||||
signal = syscall.SIGABRT
|
||||
case "SIGINT":
|
||||
signal = syscall.SIGINT
|
||||
case "SIGTERM":
|
||||
signal = syscall.SIGTERM
|
||||
}
|
||||
return e.Terminate(signal)
|
||||
}
|
||||
|
||||
// If the process is already offline don't switch it back to stopping. Just leave it how
|
||||
// it is and continue through to the stop handling for the process.
|
||||
if e.State() != environment.ProcessOfflineState {
|
||||
e.setState(environment.ProcessStoppingState)
|
||||
if e.st.Load() != environment.ProcessOfflineState {
|
||||
e.SetState(environment.ProcessStoppingState)
|
||||
}
|
||||
|
||||
// Only attempt to send the stop command to the instance if we are actually attached to
|
||||
// the instance. If we are not for some reason, just send the container stop event.
|
||||
if e.IsAttached() && s.Type == api.ProcessStopCommand {
|
||||
if e.IsAttached() && s.Type == remote.ProcessStopCommand {
|
||||
return e.SendCommand(s.Value)
|
||||
}
|
||||
|
||||
t := time.Second * 30
|
||||
err := e.client.ContainerStop(context.Background(), e.Id, &t)
|
||||
if err != nil {
|
||||
if err := e.client.ContainerStop(context.Background(), e.Id, &t); err != nil {
|
||||
// If the container does not exist just mark the process as stopped and return without
|
||||
// an error.
|
||||
if client.IsErrNotFound(err) {
|
||||
e.SetStream(nil)
|
||||
e.setState(environment.ProcessOfflineState)
|
||||
|
||||
e.SetState(environment.ProcessOfflineState)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
return errors.Wrap(err, "environment/docker: cannot stop container")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempts to gracefully stop a server using the defined stop command. If the server
|
||||
// does not stop after seconds have passed, an error will be returned, or the instance
|
||||
// will be terminated forcefully depending on the value of the second argument.
|
||||
// WaitForStop attempts to gracefully stop a server using the defined stop
|
||||
// command. If the server does not stop after seconds have passed, an error will
|
||||
// be returned, or the instance will be terminated forcefully depending on the
|
||||
// value of the second argument.
|
||||
func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
|
||||
if err := e.Stop(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second)
|
||||
@@ -183,22 +200,28 @@ func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
|
||||
case <-ctx.Done():
|
||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
if terminate {
|
||||
log.WithField("container_id", e.Id).Debug("server did not stop in time, executing process termination")
|
||||
log.WithField("container_id", e.Id).Info("server did not stop in time, executing process termination")
|
||||
|
||||
return errors.WithStack(e.Terminate(os.Kill))
|
||||
return e.Terminate(os.Kill)
|
||||
}
|
||||
|
||||
return errors.WithStack(ctxErr)
|
||||
return ctxErr
|
||||
}
|
||||
case err := <-errChan:
|
||||
if err != nil {
|
||||
// If the error stems from the container not existing there is no point in wasting
|
||||
// CPU time to then try and terminate it.
|
||||
if err != nil && !client.IsErrNotFound(err) {
|
||||
if terminate {
|
||||
log.WithField("container_id", e.Id).WithField("error", errors.WithStack(err)).Warn("error while waiting for container stop, attempting process termination")
|
||||
|
||||
return errors.WithStack(e.Terminate(os.Kill))
|
||||
l := log.WithField("container_id", e.Id)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
l.Warn("deadline exceeded for container stop; terminating process")
|
||||
} else {
|
||||
l.WithField("error", err).Warn("error while waiting for container stop; terminating process")
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
return e.Terminate(os.Kill)
|
||||
}
|
||||
return errors.WrapIf(err, "environment/docker: error waiting on container to enter \"not-running\" state")
|
||||
}
|
||||
case <-ok:
|
||||
}
|
||||
@@ -206,10 +229,15 @@ func (e *Environment) WaitForStop(seconds uint, terminate bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Forcefully terminates the container using the signal passed through.
|
||||
// Terminate forcefully terminates the container using the signal provided.
|
||||
func (e *Environment) Terminate(signal os.Signal) error {
|
||||
c, err := e.client.ContainerInspect(context.Background(), e.Id)
|
||||
if err != nil {
|
||||
// Treat missing containers as an okay error state, means it is obviously
|
||||
// already terminated at this point.
|
||||
if client.IsErrNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
@@ -217,24 +245,21 @@ func (e *Environment) Terminate(signal os.Signal) error {
|
||||
// If the container is not running but we're not already in a stopped state go ahead
|
||||
// and update things to indicate we should be completely stopped now. Set to stopping
|
||||
// first so crash detection is not triggered.
|
||||
if e.State() != environment.ProcessOfflineState {
|
||||
e.setState(environment.ProcessStoppingState)
|
||||
e.setState(environment.ProcessOfflineState)
|
||||
if e.st.Load() != environment.ProcessOfflineState {
|
||||
e.SetState(environment.ProcessStoppingState)
|
||||
e.SetState(environment.ProcessOfflineState)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// We set it to stopping than offline to prevent crash detection from being triggered.
|
||||
e.setState(environment.ProcessStoppingState)
|
||||
|
||||
e.SetState(environment.ProcessStoppingState)
|
||||
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
|
||||
|
||||
if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil {
|
||||
return err
|
||||
if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil && !client.IsErrNotFound(err) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
e.setState(environment.ProcessOfflineState)
|
||||
e.SetState(environment.ProcessOfflineState)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
)
|
||||
|
||||
// Returns the current environment state.
|
||||
func (e *Environment) State() string {
|
||||
e.stMu.RLock()
|
||||
defer e.stMu.RUnlock()
|
||||
|
||||
return e.st
|
||||
}
|
||||
|
||||
// Sets the state of the environment. This emits an event that server's can hook into to
|
||||
// take their own actions and track their own state based on the environment.
|
||||
func (e *Environment) setState(state string) error {
|
||||
if state != environment.ProcessOfflineState &&
|
||||
state != environment.ProcessStartingState &&
|
||||
state != environment.ProcessRunningState &&
|
||||
state != environment.ProcessStoppingState {
|
||||
return errors.New(fmt.Sprintf("invalid server state received: %s", state))
|
||||
}
|
||||
|
||||
// Get the current state of the environment before changing it.
|
||||
prevState := e.State()
|
||||
|
||||
// Emit the event to any listeners that are currently registered.
|
||||
if prevState != state {
|
||||
// If the state changed make sure we update the internal tracking to note that.
|
||||
e.stMu.Lock()
|
||||
e.st = state
|
||||
e.stMu.Unlock()
|
||||
|
||||
e.Events().Publish(environment.StateChangeEvent, e.State())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -2,81 +2,66 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"emperror.dev/errors"
|
||||
"encoding/json"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"io"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Attach to the instance and then automatically emit an event whenever the resource usage for the
|
||||
// server process changes.
|
||||
func (e *Environment) pollResources(ctx context.Context) error {
|
||||
l := log.WithField("container_id", e.Id)
|
||||
|
||||
l.Debug("starting resource polling for container")
|
||||
defer l.Debug("stopped resource polling for container")
|
||||
|
||||
if e.State() == environment.ProcessOfflineState {
|
||||
if e.st.Load() == environment.ProcessOfflineState {
|
||||
return errors.New("cannot enable resource polling on a stopped server")
|
||||
}
|
||||
|
||||
stats, err := e.client.ContainerStats(context.Background(), e.Id, true)
|
||||
e.log().Info("starting resource polling for container")
|
||||
defer e.log().Debug("stopped resource polling for container")
|
||||
|
||||
stats, err := e.client.ContainerStats(ctx, e.Id, true)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
defer stats.Body.Close()
|
||||
|
||||
dec := json.NewDecoder(stats.Body)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
var v *types.StatsJSON
|
||||
|
||||
var v types.StatsJSON
|
||||
if err := dec.Decode(&v); err != nil {
|
||||
if err != io.EOF {
|
||||
l.WithField("error", errors.WithStack(err)).Warn("error while processing Docker stats output for container")
|
||||
if err != io.EOF && !errors.Is(err, context.Canceled) {
|
||||
e.log().WithField("error", err).Warn("error while processing Docker stats output for container")
|
||||
} else {
|
||||
l.Debug("io.EOF encountered during stats decode, stopping polling...")
|
||||
e.log().Debug("io.EOF encountered during stats decode, stopping polling...")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Disable collection if the server is in an offline state and this process is still running.
|
||||
if e.State() == environment.ProcessOfflineState {
|
||||
l.Debug("process in offline state while resource polling is still active; stopping poll")
|
||||
if e.st.Load() == environment.ProcessOfflineState {
|
||||
e.log().Debug("process in offline state while resource polling is still active; stopping poll")
|
||||
return nil
|
||||
}
|
||||
|
||||
var rx uint64
|
||||
var tx uint64
|
||||
for _, nw := range v.Networks {
|
||||
atomic.AddUint64(&rx, nw.RxBytes)
|
||||
atomic.AddUint64(&tx, nw.RxBytes)
|
||||
}
|
||||
|
||||
st := &environment.Stats{
|
||||
st := environment.Stats{
|
||||
Memory: calculateDockerMemory(v.MemoryStats),
|
||||
MemoryLimit: v.MemoryStats.Limit,
|
||||
CpuAbsolute: calculateDockerAbsoluteCpu(&v.PreCPUStats, &v.CPUStats),
|
||||
Network: struct {
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
}{
|
||||
RxBytes: rx,
|
||||
TxBytes: tx,
|
||||
},
|
||||
CpuAbsolute: calculateDockerAbsoluteCpu(v.PreCPUStats, v.CPUStats),
|
||||
Network: environment.NetworkStats{},
|
||||
}
|
||||
|
||||
for _, nw := range v.Networks {
|
||||
st.Network.RxBytes += nw.RxBytes
|
||||
st.Network.TxBytes += nw.TxBytes
|
||||
}
|
||||
|
||||
if b, err := json.Marshal(st); err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Warn("error while marshaling stats object for environment")
|
||||
e.log().WithField("error", err).Warn("error while marshaling stats object for environment")
|
||||
} else {
|
||||
e.Events().Publish(environment.ResourceEvent, string(b))
|
||||
}
|
||||
@@ -109,7 +94,7 @@ func calculateDockerMemory(stats types.MemoryStats) uint64 {
|
||||
// by the defined CPU limits on the container.
|
||||
//
|
||||
// @see https://github.com/docker/cli/blob/aa097cf1aa19099da70930460250797c8920b709/cli/command/container/stats_helpers.go#L166
|
||||
func calculateDockerAbsoluteCpu(pStats *types.CPUStats, stats *types.CPUStats) float64 {
|
||||
func calculateDockerAbsoluteCpu(pStats types.CPUStats, stats types.CPUStats) float64 {
|
||||
// Calculate the change in CPU usage between the current and previous reading.
|
||||
cpuDelta := float64(stats.CPUUsage.TotalUsage) - float64(pStats.CPUUsage.TotalUsage)
|
||||
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type dockerLogLine struct {
|
||||
Log string `json:"log"`
|
||||
}
|
||||
|
||||
var ErrNotAttached = errors.New("not attached to instance")
|
||||
|
||||
func (e *Environment) setStream(s *types.HijackedResponse) {
|
||||
e.mu.Lock()
|
||||
e.stream = s
|
||||
e.mu.Unlock()
|
||||
}
|
||||
|
||||
// Sends the specified command to the stdin of the running container instance. There is no
|
||||
// confirmation that this data is sent successfully, only that it gets pushed into the stdin.
|
||||
func (e *Environment) SendCommand(c string) error {
|
||||
if !e.IsAttached() {
|
||||
return ErrNotAttached
|
||||
}
|
||||
|
||||
e.mu.RLock()
|
||||
defer e.mu.RUnlock()
|
||||
|
||||
// If the command being processed is the same as the process stop command then we want to mark
|
||||
// the server as entering the stopping state otherwise the process will stop and Wings will think
|
||||
// it has crashed and attempt to restart it.
|
||||
if e.meta.Stop.Type == "command" && c == e.meta.Stop.Value {
|
||||
e.Events().Publish(environment.StateChangeEvent, environment.ProcessStoppingState)
|
||||
}
|
||||
|
||||
_, err := e.stream.Conn.Write([]byte(c + "\n"))
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Reads the log file for the server. This does not care if the server is running or not, it will
|
||||
// simply try to read the last X bytes of the file and return them.
|
||||
func (e *Environment) Readlog(lines int) ([]string, error) {
|
||||
r, err := e.client.ContainerLogs(context.Background(), e.Id, types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Tail: strconv.Itoa(lines),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
var out []string
|
||||
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
out = append(out, scanner.Text())
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Docker stores the logs for server output in a JSON format. This function will iterate over the JSON
|
||||
// that was read from the log file and parse it into a more human readable format.
|
||||
func (e *Environment) parseLogToStrings(b []byte) ([]string, error) {
|
||||
var hasError = false
|
||||
var out []string
|
||||
|
||||
scanner := bufio.NewScanner(bytes.NewReader(b))
|
||||
for scanner.Scan() {
|
||||
var l dockerLogLine
|
||||
|
||||
// Unmarshal the contents and allow up to a single error before bailing out of the process. We
|
||||
// do this because if you're arbitrarily reading a length of the file you'll likely end up
|
||||
// with the first line in the output being improperly formatted JSON. In those cases we want to
|
||||
// just skip over it. However if we see another error we're going to bail out because that is an
|
||||
// abnormal situation.
|
||||
if err := json.Unmarshal([]byte(scanner.Text()), &l); err != nil {
|
||||
if hasError {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hasError = true
|
||||
continue
|
||||
}
|
||||
|
||||
out = append(out, l.Log)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
package environment
|
||||
|
||||
import (
|
||||
"github.com/pterodactyl/wings/events"
|
||||
"os"
|
||||
|
||||
"github.com/pterodactyl/wings/events"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -94,4 +95,12 @@ type ProcessEnvironment interface {
|
||||
// Reads the log file for the process from the end backwards until the provided
|
||||
// number of lines is met.
|
||||
Readlog(int) ([]string, error)
|
||||
|
||||
// Returns the current state of the environment.
|
||||
State() string
|
||||
|
||||
// Sets the current state of the environment. In general you should let the environment
|
||||
// handle this itself, but there are some scenarios where it is helpful for the server
|
||||
// to update the state externally (e.g. starting -> started).
|
||||
SetState(string)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,10 @@ package environment
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/apex/log"
|
||||
)
|
||||
|
||||
type Mount struct {
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
package environment
|
||||
|
||||
import "sync"
|
||||
|
||||
// Defines the current resource usage for a given server instance. If a server is offline you
|
||||
// should obviously expect memory and CPU usage to be 0. However, disk will always be returned
|
||||
// since that is not dependent on the server being running to collect that data.
|
||||
type Stats struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// The total amount of memory, in bytes, that this server instance is consuming. This is
|
||||
// calculated slightly differently than just using the raw Memory field that the stats
|
||||
// return from the container, so please check the code setting this value for how that
|
||||
@@ -28,20 +24,10 @@ type Stats struct {
|
||||
// Disk int64 `json:"disk_bytes"`
|
||||
|
||||
// Current network transmit in & out for a container.
|
||||
Network struct {
|
||||
Network NetworkStats `json:"network"`
|
||||
}
|
||||
|
||||
type NetworkStats struct {
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
} `json:"network"`
|
||||
}
|
||||
|
||||
// Resets the usages values to zero, used when a server is stopped to ensure we don't hold
|
||||
// onto any values incorrectly.
|
||||
func (s *Stats) Empty() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.Memory = 0
|
||||
s.CpuAbsolute = 0
|
||||
s.Network.TxBytes = 0
|
||||
s.Network.RxBytes = 0
|
||||
}
|
||||
|
||||
@@ -2,10 +2,10 @@ package events
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/gammazero/workerpool"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gammazero/workerpool"
|
||||
)
|
||||
|
||||
type Event struct {
|
||||
@@ -69,7 +69,7 @@ func (e *EventBus) Publish(topic string, data string) {
|
||||
func (e *EventBus) PublishJson(topic string, data interface{}) error {
|
||||
b, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
e.Publish(topic, string(b))
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"github.com/gammazero/workerpool"
|
||||
"reflect"
|
||||
|
||||
"github.com/gammazero/workerpool"
|
||||
)
|
||||
|
||||
type CallbackPool struct {
|
||||
|
||||
106
go.mod
106
go.mod
@@ -1,82 +1,82 @@
|
||||
module github.com/pterodactyl/wings
|
||||
|
||||
go 1.13
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.1.0
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Jeffail/gabs/v2 v2.5.1
|
||||
github.com/Microsoft/go-winio v0.4.14 // indirect
|
||||
emperror.dev/errors v0.8.0
|
||||
github.com/AlecAivazis/survey/v2 v2.2.7
|
||||
github.com/Jeffail/gabs/v2 v2.6.0
|
||||
github.com/Microsoft/go-winio v0.4.16 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.14 // indirect
|
||||
github.com/NYTimes/logrotate v1.0.0
|
||||
github.com/andybalholm/brotli v1.0.0 // indirect
|
||||
github.com/apex/log v1.8.0
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535
|
||||
github.com/andybalholm/brotli v1.0.1 // indirect
|
||||
github.com/apex/log v1.9.0
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
|
||||
github.com/beevik/etree v1.1.0
|
||||
github.com/buger/jsonparser v1.0.0
|
||||
github.com/buger/jsonparser v1.1.0
|
||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
||||
github.com/containerd/containerd v1.3.7 // indirect
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b // indirect
|
||||
github.com/creasty/defaults v1.5.0
|
||||
github.com/docker/cli v17.12.1-ce-rc2+incompatible
|
||||
github.com/containerd/containerd v1.4.3 // indirect
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c // indirect
|
||||
github.com/creasty/defaults v1.5.1
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible
|
||||
github.com/docker/docker v20.10.1+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fatih/color v1.9.0
|
||||
github.com/fatih/color v1.10.0
|
||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
||||
github.com/frankban/quicktest v1.10.2 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.1.1
|
||||
github.com/gammazero/deque v0.0.0-20200721202602-07291166fe33 // indirect
|
||||
github.com/gammazero/workerpool v1.0.0
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.2
|
||||
github.com/gabriel-vasile/mimetype v1.1.2
|
||||
github.com/gammazero/deque v0.0.0-20201010052221-3932da5530cc // indirect
|
||||
github.com/gammazero/workerpool v1.1.1
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0
|
||||
github.com/gin-gonic/gin v1.6.3
|
||||
github.com/go-playground/validator/v10 v10.3.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.1 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/go-playground/validator/v10 v10.4.1 // indirect
|
||||
github.com/golang/snappy v0.0.2 // indirect
|
||||
github.com/google/go-cmp v0.5.2 // indirect
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/gorilla/mux v1.7.4 // indirect
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334
|
||||
github.com/iancoleman/strcase v0.1.2
|
||||
github.com/icza/dyno v0.0.0-20200205103839-49cb13720835
|
||||
github.com/imdario/mergo v0.3.8
|
||||
github.com/imdario/mergo v0.3.9
|
||||
github.com/juju/ratelimit v1.0.1
|
||||
github.com/karrick/godirwalk v1.16.1
|
||||
github.com/klauspost/compress v1.10.10 // indirect
|
||||
github.com/klauspost/pgzip v1.2.4
|
||||
github.com/klauspost/compress v1.11.4 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/magefile/mage v1.10.0 // indirect
|
||||
github.com/magiconair/properties v1.8.1
|
||||
github.com/mattn/go-colorable v0.1.7
|
||||
github.com/mattn/go-shellwords v1.0.10 // indirect
|
||||
github.com/magiconair/properties v1.8.4
|
||||
github.com/mattn/go-colorable v0.1.8
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mholt/archiver/v3 v3.3.0
|
||||
github.com/mholt/archiver/v3 v3.5.0
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/nwaples/rardecode v1.1.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pierrec/lz4/v4 v4.1.2 // indirect
|
||||
github.com/pkg/profile v1.5.0
|
||||
github.com/pkg/sftp v1.11.0
|
||||
github.com/prometheus/common v0.11.1 // indirect
|
||||
github.com/remeh/sizedwaitgroup v1.0.0
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ulikunitz/xz v0.5.7 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 // indirect
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
||||
golang.org/x/text v0.3.3 // indirect
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
|
||||
github.com/pkg/sftp v1.12.0
|
||||
github.com/prometheus/client_golang v1.9.0 // indirect
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f
|
||||
github.com/sirupsen/logrus v1.7.0 // indirect
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/ugorji/go v1.2.2 // indirect
|
||||
github.com/ulikunitz/xz v0.5.9 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b // indirect
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
golang.org/x/sys v0.0.0-20201223074533-0d417f636930 // indirect
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
|
||||
golang.org/x/text v0.3.4 // indirect
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 // indirect
|
||||
google.golang.org/grpc v1.31.0 // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d // indirect
|
||||
google.golang.org/grpc v1.34.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||
gopkg.in/ini.v1 v1.57.0
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
gopkg.in/ini.v1 v1.62.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
393
go.sum
393
go.sum
@@ -1,16 +1,32 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/AlecAivazis/survey/v2 v2.1.0 h1:AT4+23hOFopXYZaNGugbk7MWItkz0SfTmH/Hk92KeeE=
|
||||
github.com/AlecAivazis/survey/v2 v2.1.0/go.mod h1:9FJRdMdDm8rnT+zHVbvQT2RTSTLq0Ttd6q3Vl2fahjk=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
emperror.dev/errors v0.8.0 h1:4lycVEx0sdJkwDUfQ9pdu6SR0x7rgympt5f4+ok8jDk=
|
||||
emperror.dev/errors v0.8.0/go.mod h1:YcRvLPh626Ubn2xqtoprejnA5nFha+TJ+2vew48kWuE=
|
||||
github.com/AlecAivazis/survey/v2 v2.2.7 h1:5NbxkF4RSKmpywYdcRgUmos1o+roJY8duCLZXbVjoig=
|
||||
github.com/AlecAivazis/survey/v2 v2.2.7/go.mod h1:9DYvHgXtiXm6nCn+jXnOXLKbH+Yo9u8fAS/SduGdoPk=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Jeffail/gabs/v2 v2.5.1 h1:ANfZYjpMlfTTKebycu4X1AgkVWumFVDYQl7JwOr4mDk=
|
||||
github.com/Jeffail/gabs/v2 v2.5.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Jeffail/gabs/v2 v2.6.0 h1:WdCnGaDhNa4LSRTMwhLZzJ7SRDXjABNP13SOKvCpL5w=
|
||||
github.com/Jeffail/gabs/v2 v2.6.0/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||
github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk=
|
||||
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||
github.com/Microsoft/hcsshim v0.8.14 h1:lbPVK25c1cu5xTLITwpUcxoA9vKrKErASPYygvouJns=
|
||||
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
||||
github.com/NYTimes/logrotate v1.0.0 h1:6jFGbon6jOtpy3t3kwZZKS4Gdmf1C/Wv5J4ll4Xn5yk=
|
||||
github.com/NYTimes/logrotate v1.0.0/go.mod h1:GxNz1cSw1c6t99PXoZlw+nm90H6cyQyrH66pjVv7x88=
|
||||
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=
|
||||
@@ -25,29 +41,26 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6 h1:bZ28Hqta7TFAK3Q08CMvv8y3/8ATaEqv2nGoc6yff6c=
|
||||
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U=
|
||||
github.com/andybalholm/brotli v1.0.0 h1:7UCwP93aiSfvWpapti8g88vVVGp2qqtGyePsSuDafo4=
|
||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.1 h1:KqhlKozYbRtJvsPrrEeXcO+N2l6NYT5A2QAFmSULpEc=
|
||||
github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/apex/log v1.8.0 h1:+W4j+dttibFvynPLlctdnYFUn1eLKT37BZWWW2iMfEM=
|
||||
github.com/apex/log v1.8.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA=
|
||||
github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0=
|
||||
github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA=
|
||||
github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
|
||||
github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE=
|
||||
github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg=
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
|
||||
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||
@@ -56,8 +69,9 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/buger/jsonparser v1.0.0 h1:etJTGF5ESxjI0Ic2UaLQs2LQQpa8G9ykQScukbh4L8A=
|
||||
github.com/buger/jsonparser v1.0.0/go.mod h1:tgcrVJ81GPSF0mz+0nu1Xaz0fazGPrmmJfJtxjbHhUQ=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/buger/jsonparser v1.1.0 h1:EPAGdKZgZCON4ZcMD+h4l/NN4ndr6ijSpj4INh8PbUY=
|
||||
github.com/buger/jsonparser v1.1.0/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@@ -65,40 +79,51 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249 h1:R0IDH8daQ3lODvu8YtxnIqqth5qMGCJyADoUQvmLx4o=
|
||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249/go.mod h1:EHKW9yNEYSBpTKzuu7Y9oOrft/UlzH57rMIB03oev6M=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/containerd v1.3.7 h1:eFSOChY8TTcxvkzp8g+Ov1RL0MYww7XEeK0y+zqGpVc=
|
||||
github.com/containerd/containerd v1.3.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b h1:qUtCegLdOUVfVJOw+KDg6eJyE1TGvLlkGEd1091kSSQ=
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.3 h1:ijQT13JedHSHrQGWFcGEwzcNKrAGIiZ+jSD5QQG07SY=
|
||||
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c h1:1c6xmkNiu6Jnr6AKGM91GGNsfU+nPNFvw9BZFSo0E+c=
|
||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creasty/defaults v1.5.0 h1:DW6NAGGaKuNSKkntc8BCBrR2KOUAcXVnfcwu/LmJhaQ=
|
||||
github.com/creasty/defaults v1.5.0/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creasty/defaults v1.5.1 h1:j8WexcS3d/t4ZmllX4GEkl4wIB/trOr035ajcLHCISM=
|
||||
github.com/creasty/defaults v1.5.1/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/cli v17.12.1-ce-rc2+incompatible h1:ESUycEAqvFuLglAHkUW66rCc2djYtd3i1x231svLq9o=
|
||||
github.com/docker/cli v17.12.1-ce-rc2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible h1:iWPIG7pWIsCwT6ZtHnTUpoVMnete7O/pzd9HFE3+tn8=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.1+incompatible h1:u0HIBLwOJdemyBdTCkoBX34u3lb5KyBo0rQE3a5Yg+E=
|
||||
github.com/docker/docker v20.10.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||
@@ -116,43 +141,37 @@ github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaB
|
||||
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
|
||||
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
|
||||
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
|
||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd h1:b/30UOB56Rhfe185ZfgvZT0/HOql0OzxuiNOxRKXRXc=
|
||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk=
|
||||
github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gabriel-vasile/mimetype v1.1.1 h1:qbN9MPuRf3bstHu9zkI9jDWNfH//9+9kHxr9oRBBBOA=
|
||||
github.com/gabriel-vasile/mimetype v1.1.1/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To=
|
||||
github.com/gammazero/deque v0.0.0-20200227231300-1e9af0e52b46 h1:iX4+rD9Fjdx8SkmSO/O5WAIX/j79ll3kuqv5VdYt9J8=
|
||||
github.com/gammazero/deque v0.0.0-20200227231300-1e9af0e52b46/go.mod h1:D90+MBHVc9Sk1lJAbEVgws0eYEurY4mv2TDso3Nxh3w=
|
||||
github.com/gammazero/deque v0.0.0-20200721202602-07291166fe33 h1:UG4wNrJX9xSKnm/Gck5yTbxnOhpNleuE4MQRdmcGySo=
|
||||
github.com/gabriel-vasile/mimetype v1.1.2 h1:gaPnPcNor5aZSVCJVSGipcpbgMWiAAj9z182ocSGbHU=
|
||||
github.com/gabriel-vasile/mimetype v1.1.2/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To=
|
||||
github.com/gammazero/deque v0.0.0-20200721202602-07291166fe33/go.mod h1:D90+MBHVc9Sk1lJAbEVgws0eYEurY4mv2TDso3Nxh3w=
|
||||
github.com/gammazero/workerpool v1.0.0 h1:MfkJc6KL0tAmjrRDS203AZz3F+84Uod9YbL8KjpcQ00=
|
||||
github.com/gammazero/workerpool v1.0.0/go.mod h1:/XWO2YAUUpPi3smDlFBl0vpX0JHwUomDM/oRMwRmnSs=
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.2 h1:3t7jvTkeQfk1FdP0noXSNiM6AdBokLz7QmZDmnCHAAA=
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0-rc.2/go.mod h1:AncDcjXz18xetI3A6STfXq2w+LuTx8pQ8bGEwRN8zVM=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/gammazero/deque v0.0.0-20201010052221-3932da5530cc h1:F7BbnLACph7UYiz9ZHi6npcROwKaZUyviDjsNERsoMM=
|
||||
github.com/gammazero/deque v0.0.0-20201010052221-3932da5530cc/go.mod h1:IlBLfYXnuw9sspy1XS6ctu5exGb6WHGKQsyo4s7bOEA=
|
||||
github.com/gammazero/workerpool v1.1.1 h1:MN29GcZtZZAgzTU+Zk54Y+J9XkE54MoXON/NCZvNulo=
|
||||
github.com/gammazero/workerpool v1.1.1/go.mod h1:5BN0IJVRjSFAypo9QTJCaWdijjNz9Jjl6VFS1PRjCeg=
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0 h1:gtPjdT3gAbBLjVckJsgNf+a46sqrCBfRebg2r/NysIo=
|
||||
github.com/gbrlsnchs/jwt/v3 v3.0.0/go.mod h1:AncDcjXz18xetI3A6STfXq2w+LuTx8pQ8bGEwRN8zVM=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14=
|
||||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
|
||||
@@ -161,27 +180,25 @@ github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8c
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
|
||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-playground/validator/v10 v10.3.0 h1:nZU+7q+yJoFmwvNgv/LnPUkwPal62+b2xXj0AU1Es7o=
|
||||
github.com/go-playground/validator/v10 v10.3.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721 h1:KRMr9A3qfbVM7iV/WcLY/rL5LICqwMHLhwRXKu99fXw=
|
||||
github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -192,27 +209,33 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw=
|
||||
github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
@@ -221,8 +244,6 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
|
||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
@@ -230,7 +251,9 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
@@ -255,12 +278,12 @@ github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDG
|
||||
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
|
||||
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334 h1:VHgatEHNcBFEB7inlalqfNqw65aNkM1lGX2yt3NmbS8=
|
||||
github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
|
||||
github.com/iancoleman/strcase v0.1.2 h1:gnomlvw9tnV3ITTAxzKSgTF+8kFWcU/f+TgttpXGz1U=
|
||||
github.com/iancoleman/strcase v0.1.2/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
|
||||
github.com/icza/dyno v0.0.0-20200205103839-49cb13720835 h1:f1irK5f03uGGj+FjgQfZ5VhdKNVQVJ4skHsedzVohQ4=
|
||||
github.com/icza/dyno v0.0.0-20200205103839-49cb13720835/go.mod h1:c1tRKs5Tx7E2+uHGSyyncziFjvGpgv4H2HrqXeUQ/Uk=
|
||||
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
|
||||
@@ -271,13 +294,14 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
|
||||
github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=
|
||||
github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw=
|
||||
@@ -288,70 +312,57 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
|
||||
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.10 h1:a/y8CglcM7gLGYmlbP/stPE5sR3hbhFRUjCBfd/0B3I=
|
||||
github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.4 h1:kz40R/YWls3iqT9zX9AHN3WoVsrAWVyui5sxuLqiXqU=
|
||||
github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
|
||||
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A=
|
||||
github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.4 h1:5Myjjh3JY/NaAi4IsUbHADytDyl1VE1Y9PXDlL+P/VQ=
|
||||
github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE=
|
||||
github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/magefile/mage v1.10.0 h1:3HiXzCUY12kh9bIuyXShaVe529fJfyqoVM42o/uom2g=
|
||||
github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.4 h1:8KGKTcQQGm0Kv7vEbKFErAoAOFyyacLStRtQSeYtvkY=
|
||||
github.com/magiconair/properties v1.8.4/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw=
|
||||
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
|
||||
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mholt/archiver/v3 v3.3.0 h1:vWjhY8SQp5yzM9P6OJ/eZEkmi3UAbRrxCq48MxjAzig=
|
||||
github.com/mholt/archiver/v3 v3.3.0/go.mod h1:YnQtqsp+94Rwd0D/rk5cnLrxusUBUXg+08Ebtr1Mqao=
|
||||
github.com/mholt/archiver/v3 v3.5.0 h1:nE8gZIrw66cu4osS/U7UW7YDuGMHssxKutU8IfWxwWE=
|
||||
github.com/mholt/archiver/v3 v3.5.0/go.mod h1:qqTTPUK/HZPFgFQ/TJ3BzvTpF/dPtFVJXdQbCmeMxwc=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
|
||||
@@ -363,6 +374,8 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
|
||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -382,8 +395,6 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nwaples/rardecode v1.0.0 h1:r7vGuS5akxOnR4JQSkko62RJ1ReCMXxQRPtxsiFMBOs=
|
||||
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
||||
github.com/nwaples/rardecode v1.1.0 h1:vSxaY8vQhOcVr4mm5e8XllHWTiM4JF507A0Katqw7MQ=
|
||||
github.com/nwaples/rardecode v1.1.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
@@ -395,10 +406,13 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
@@ -417,19 +431,18 @@ github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible h1:WCjObylUIOlKy/+7Abdn34TLIkXiA4UWUMhxq9m9ZXI=
|
||||
github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pierrec/lz4/v4 v4.0.3/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.2 h1:qvY3YFXRQE/XB8MlLzJH7mSzBs74eA2gg52YTk6jUPM=
|
||||
github.com/pierrec/lz4/v4 v4.1.2/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pkg/profile v1.5.0 h1:042Buzk+NhDI+DeSAA62RwJL8VAuZUMQZUjCsRz1Mug=
|
||||
github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
|
||||
github.com/pkg/sftp v1.11.0 h1:4Zv0OGbpkg4yNuUtH0s8rvoYxRCNyT29NVUo6pgPmxI=
|
||||
github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.12.0 h1:/f3b24xrDhkhddlaobPe2JgBqfdt+gC/NYl0QY9IOuI=
|
||||
github.com/pkg/sftp v1.12.0/go.mod h1:fUqqXB5vEgVCZ131L+9say31RAri6aF6KDViawhxKK8=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
@@ -437,14 +450,13 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
|
||||
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU=
|
||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -454,43 +466,40 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.11.1 h1:0ZISXCMRuCZcxF77aT1BXY5m74mX2vrGYl1dSwBI0Jo=
|
||||
github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
|
||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remeh/sizedwaitgroup v1.0.0 h1:VNGGFwNo/R5+MJBf6yrsr110p0m4/OX4S3DCy7Kyl5E=
|
||||
github.com/remeh/sizedwaitgroup v1.0.0/go.mod h1:3j2R4OIe/SeS6YDhICBy22RWjJC5eNCJ1V+9+NVNYlo=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94 h1:G04eS0JkAIVZfaJLjla9dNxkJCPiKIGZlw9AfOhzOD0=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20180611051255-d3107576ba94/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f h1:8P2MkG70G76gnZBOPGwmMIgwBb/rESQuwsJ7K8ds4NE=
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs=
|
||||
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8=
|
||||
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
|
||||
@@ -504,15 +513,14 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
|
||||
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
|
||||
@@ -521,50 +529,53 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
|
||||
github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk=
|
||||
github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk=
|
||||
github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc=
|
||||
github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0=
|
||||
github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao=
|
||||
github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds=
|
||||
github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
|
||||
github.com/ugorji/go v1.2.2 h1:60ZHIOcsJlo3bJm9CbTVu7OSqT2mxaEmyQbK2NwCkn0=
|
||||
github.com/ugorji/go v1.2.2/go.mod h1:bitgyERdV7L7Db/Z5gfd5v2NQMNhhiFiZwpgMw2SP7k=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
|
||||
github.com/ugorji/go/codec v1.2.2 h1:08Gah8d+dXj4cZNUHhtuD/S4PXD5WpVbj5B8/ClELAQ=
|
||||
github.com/ugorji/go/codec v1.2.2/go.mod h1:OM8g7OAy52uYl3Yk+RE/3AS1nXFn1Wh4PPLtupCxbuU=
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
|
||||
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I=
|
||||
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
@@ -574,22 +585,32 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -603,26 +624,27 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -633,37 +655,52 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201223074533-0d417f636930 h1:vRgIt+nup/B/BwIS0g2oC0haq0iqbV3ZA+u6+0TlNCo=
|
||||
golang.org/x/sys v0.0.0-20201223074533-0d417f636930/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -671,49 +708,67 @@ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
|
||||
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28=
|
||||
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d h1:HV9Z9qMhQEsdlvxNFELgQ11RkMzO3CMkjEySjCtuLes=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -722,13 +777,11 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
@@ -737,30 +790,34 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww=
|
||||
gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package installer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/asaskevich/govalidator"
|
||||
"github.com/buger/jsonparser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
)
|
||||
|
||||
@@ -14,10 +16,10 @@ type Installer struct {
|
||||
server *server.Server
|
||||
}
|
||||
|
||||
// Validates the received data to ensure that all of the required fields
|
||||
// New validates the received data to ensure that all of the required fields
|
||||
// have been passed along in the request. This should be manually run before
|
||||
// calling Execute().
|
||||
func New(data []byte) (*Installer, error) {
|
||||
func New(ctx context.Context, manager *server.Manager, data []byte) (*Installer, error) {
|
||||
if !govalidator.IsUUIDv4(getString(data, "uuid")) {
|
||||
return nil, NewValidationError("uuid provided was not in a valid format")
|
||||
}
|
||||
@@ -43,50 +45,49 @@ func New(data []byte) (*Installer, error) {
|
||||
|
||||
// Unmarshal the environment variables from the request into the server struct.
|
||||
if b, _, _, err := jsonparser.Get(data, "environment"); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, errors.WithStackIf(err)
|
||||
} else {
|
||||
cfg.EnvVars = make(environment.Variables)
|
||||
if err := json.Unmarshal(b, &cfg.EnvVars); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, errors.WrapIf(err, "installer: could not unmarshal environment variables for server")
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal the allocation mappings from the request into the server struct.
|
||||
if b, _, _, err := jsonparser.Get(data, "allocations", "mappings"); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, errors.WithStackIf(err)
|
||||
} else {
|
||||
cfg.Allocations.Mappings = make(map[string][]int)
|
||||
if err := json.Unmarshal(b, &cfg.Allocations.Mappings); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, errors.Wrap(err, "installer: could not unmarshal allocation mappings")
|
||||
}
|
||||
}
|
||||
|
||||
cfg.Container.Image = getString(data, "container", "image")
|
||||
|
||||
c, rerr, err := api.NewRequester().GetServerConfiguration(cfg.Uuid)
|
||||
if err != nil || rerr != nil {
|
||||
c, err := manager.Client().GetServerConfiguration(ctx, cfg.Uuid)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
if !remote.IsRequestError(err) {
|
||||
return nil, errors.WithStackIf(err)
|
||||
}
|
||||
|
||||
return nil, errors.New(rerr.String())
|
||||
return nil, errors.WrapIf(err, "installer: could not get server configuration from remote API")
|
||||
}
|
||||
|
||||
// Create a new server instance using the configuration we wrote to the disk
|
||||
// so that everything gets instantiated correctly on the struct.
|
||||
s, err := server.FromConfiguration(c)
|
||||
|
||||
return &Installer{
|
||||
server: s,
|
||||
}, err
|
||||
s, err := manager.InitServer(c)
|
||||
if err != nil {
|
||||
return nil, errors.WrapIf(err, "installer: could not init server instance")
|
||||
}
|
||||
return &Installer{server: s}, nil
|
||||
}
|
||||
|
||||
// Returns the UUID associated with this installer instance.
|
||||
// Uuid returns the UUID associated with this installer instance.
|
||||
func (i *Installer) Uuid() string {
|
||||
return i.server.Id()
|
||||
}
|
||||
|
||||
// Return the server instance.
|
||||
// Server returns the server instance.
|
||||
func (i *Installer) Server() *server.Server {
|
||||
return i.server
|
||||
}
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"emperror.dev/errors"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/apex/log/handlers/cli"
|
||||
color2 "github.com/fatih/color"
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var Default = New(os.Stderr, true)
|
||||
|
||||
var bold = color2.New(color2.Bold)
|
||||
var boldred = color2.New(color2.Bold, color2.FgRed)
|
||||
|
||||
var Strings = [...]string{
|
||||
log.DebugLevel: "DEBUG",
|
||||
@@ -41,10 +42,6 @@ func New(w io.Writer, useColors bool) *Handler {
|
||||
return &Handler{Writer: colorable.NewNonColorable(w), Padding: 2}
|
||||
}
|
||||
|
||||
type tracer interface {
|
||||
StackTrace() errors.StackTrace
|
||||
}
|
||||
|
||||
// HandleLog implements log.Handler.
|
||||
func (h *Handler) HandleLog(e *log.Entry) error {
|
||||
color := cli.Colors[e.Level]
|
||||
@@ -60,7 +57,6 @@ func (h *Handler) HandleLog(e *log.Entry) error {
|
||||
if name == "source" {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Fprintf(h.Writer, " %s=%v", color.Sprint(name), e.Fields.Get(name))
|
||||
}
|
||||
|
||||
@@ -71,44 +67,63 @@ func (h *Handler) HandleLog(e *log.Entry) error {
|
||||
continue
|
||||
}
|
||||
|
||||
var br = color2.New(color2.Bold, color2.FgRed)
|
||||
if err, ok := e.Fields.Get("error").(error); ok {
|
||||
fmt.Fprintf(h.Writer, "\n%s%+v\n\n", br.Sprintf("Stacktrace:"), getErrorStack(err, false))
|
||||
} else {
|
||||
fmt.Fprintf(h.Writer, "\n%s%+v\n\n", br.Sprintf("Invalid Error:"), err)
|
||||
// Attach the stacktrace if it is missing at this point, but don't point
|
||||
// it specifically to this line since that is irrelevant.
|
||||
err = errors.WithStackDepthIf(err, 4)
|
||||
formatted := fmt.Sprintf("\n%s\n%+v\n\n", boldred.Sprintf("Stacktrace:"), err)
|
||||
|
||||
if !strings.Contains(formatted, "runtime.goexit") {
|
||||
_, _ = fmt.Fprint(h.Writer, formatted)
|
||||
break
|
||||
}
|
||||
|
||||
// Inserts a new-line between sections of a stack.
|
||||
// When wrapping errors, you get multiple separate stacks that start with their message,
|
||||
// this allows us to separate them with a new-line and view them more easily.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// Stacktrace:
|
||||
// readlink test: no such file or directory
|
||||
// failed to read symlink target for 'test'
|
||||
// github.com/pterodactyl/wings/server/filesystem.(*Archive).addToArchive
|
||||
// github.com/pterodactyl/wings/server/filesystem/archive.go:166
|
||||
// ... (Truncated the stack for easier reading)
|
||||
// runtime.goexit
|
||||
// runtime/asm_amd64.s:1374
|
||||
// **NEW LINE INSERTED HERE**
|
||||
// backup: error while generating server backup
|
||||
// github.com/pterodactyl/wings/server.(*Server).Backup
|
||||
// github.com/pterodactyl/wings/server/backup.go:84
|
||||
// ... (Truncated the stack for easier reading)
|
||||
// runtime.goexit
|
||||
// runtime/asm_amd64.s:1374
|
||||
//
|
||||
var b strings.Builder
|
||||
var endOfStack bool
|
||||
for _, s := range strings.Split(formatted, "\n") {
|
||||
b.WriteString(s + "\n")
|
||||
|
||||
if s == "runtime.goexit" {
|
||||
endOfStack = true
|
||||
continue
|
||||
}
|
||||
|
||||
if !endOfStack {
|
||||
continue
|
||||
}
|
||||
|
||||
b.WriteString("\n")
|
||||
endOfStack = false
|
||||
}
|
||||
|
||||
_, _ = fmt.Fprint(h.Writer, b.String())
|
||||
}
|
||||
|
||||
// Only one key with the name "error" can be in the map.
|
||||
break
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getErrorStack(err error, i bool) errors.StackTrace {
|
||||
e, ok := err.(tracer)
|
||||
if !ok {
|
||||
if i {
|
||||
// Just abort out of this and return a stacktrace leading up to this point. It isn't perfect
|
||||
// but it'll at least include what function lead to this being called which we can then handle.
|
||||
return errors.Wrap(err, "failed to generate stacktrace for caught error").(tracer).StackTrace()
|
||||
}
|
||||
|
||||
return getErrorStack(errors.Wrap(err, err.Error()), true)
|
||||
}
|
||||
|
||||
st := e.StackTrace()
|
||||
|
||||
l := len(st)
|
||||
// If this was an internal stack generation we're going to skip over the top four items in the stack
|
||||
// trace since they'll point to the error that was generated by this function.
|
||||
f := 0
|
||||
if i {
|
||||
f = 4
|
||||
}
|
||||
|
||||
if i && l > 9 {
|
||||
l = 9
|
||||
} else if !i && l > 5 {
|
||||
l = 5
|
||||
}
|
||||
|
||||
return st[f:l]
|
||||
}
|
||||
|
||||
@@ -2,16 +2,17 @@ package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/Jeffail/gabs/v2"
|
||||
"github.com/apex/log"
|
||||
"github.com/buger/jsonparser"
|
||||
"github.com/iancoleman/strcase"
|
||||
"github.com/pkg/errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/Jeffail/gabs/v2"
|
||||
"github.com/apex/log"
|
||||
"github.com/buger/jsonparser"
|
||||
"github.com/iancoleman/strcase"
|
||||
)
|
||||
|
||||
// Regex to match anything that has a value matching the format of {{ config.$1 }} which
|
||||
@@ -76,13 +77,13 @@ func (cfr *ConfigurationFileReplacement) getKeyValue(value []byte) interface{} {
|
||||
func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error) {
|
||||
parsed, err := gabs.ParseJSON(data)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, v := range f.Replace {
|
||||
value, err := f.LookupConfigurationValue(v)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check for a wildcard character, and if found split the key on that value to
|
||||
@@ -101,7 +102,7 @@ func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "failed to set config value of array child")
|
||||
return nil, errors.WithMessage(err, "failed to set config value of array child")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -110,7 +111,7 @@ func (f *ConfigurationFile) IterateOverJson(data []byte) (*gabs.Container, error
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "unable to set config value at pathway: "+v.Match)
|
||||
return nil, errors.WithMessage(err, "unable to set config value at pathway: "+v.Match)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -138,7 +139,7 @@ func setValueAtPath(c *gabs.Container, path string, value interface{}) error {
|
||||
_, err = c.SetP(value, path)
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
i, _ := strconv.Atoi(matches[2])
|
||||
@@ -147,10 +148,10 @@ func setValueAtPath(c *gabs.Container, path string, value interface{}) error {
|
||||
ct, err := c.ArrayElementP(i, matches[1])
|
||||
if err != nil {
|
||||
if i != 0 || (!errors.Is(err, gabs.ErrNotArray) && !errors.Is(err, gabs.ErrNotFound)) {
|
||||
return errors.Wrap(err, "error while parsing array element at path")
|
||||
return errors.WithMessage(err, "error while parsing array element at path")
|
||||
}
|
||||
|
||||
var t = make([]interface{}, 1)
|
||||
t := make([]interface{}, 1)
|
||||
// If the length of matches is 4 it means we're trying to access an object down in this array
|
||||
// key, so make sure we generate the array as an array of objects, and not just a generic nil
|
||||
// array.
|
||||
@@ -162,7 +163,7 @@ func setValueAtPath(c *gabs.Container, path string, value interface{}) error {
|
||||
// an empty object if we have additional things to set on the array, or just an empty array type
|
||||
// if there is not an object structure detected (no matches[3] available).
|
||||
if _, err = c.SetP(t, matches[1]); err != nil {
|
||||
return errors.Wrap(err, "failed to create empty array for missing element")
|
||||
return errors.WithMessage(err, "failed to create empty array for missing element")
|
||||
}
|
||||
|
||||
// Set our cursor to be the array element we expect, which in this case is just the first element
|
||||
@@ -170,7 +171,7 @@ func setValueAtPath(c *gabs.Container, path string, value interface{}) error {
|
||||
// to match additional elements. In those cases the server will just have to be rebooted or something.
|
||||
ct, err = c.ArrayElementP(0, matches[1])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to find array element at path")
|
||||
return errors.WithMessage(err, "failed to find array element at path")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,7 +188,7 @@ func setValueAtPath(c *gabs.Container, path string, value interface{}) error {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to set value at config path: "+path)
|
||||
return errors.WithMessage(err, "failed to set value at config path: "+path)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -253,7 +254,7 @@ func (f *ConfigurationFile) LookupConfigurationValue(cfr ConfigurationFileReplac
|
||||
match, _, _, err := jsonparser.Get(f.configuration, path...)
|
||||
if err != nil {
|
||||
if err != jsonparser.KeyPathNotFoundError {
|
||||
return string(match), errors.WithStack(err)
|
||||
return string(match), err
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{"path": path, "filename": f.FileName}).Debug("attempted to load a configuration value that does not exist")
|
||||
|
||||
@@ -3,21 +3,21 @@ package parser
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/beevik/etree"
|
||||
"github.com/buger/jsonparser"
|
||||
"github.com/icza/dyno"
|
||||
"github.com/magiconair/properties"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"gopkg.in/ini.v1"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The file parsing options that are available for a server configuration file.
|
||||
@@ -77,11 +77,6 @@ func (f *ConfigurationFile) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Regex to match paths such as foo[1].bar[2] and convert them into a format that
|
||||
// gabs can work with, such as foo.1.bar.2 in this case. This is applied when creating
|
||||
// the struct for the configuration file replacements.
|
||||
var cfrMatchReplacement = regexp.MustCompile(`\[(\d+)]`)
|
||||
|
||||
// Defines a single find/replace instance for a given server configuration file.
|
||||
type ConfigurationFileReplacement struct {
|
||||
Match string `json:"match"`
|
||||
@@ -172,17 +167,17 @@ func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
||||
|
||||
b := strings.TrimSuffix(path, filepath.Base(path))
|
||||
if err := os.MkdirAll(b, 0755); err != nil {
|
||||
return errors.Wrap(err, "failed to create base directory for missing configuration file")
|
||||
return errors.WithMessage(err, "failed to create base directory for missing configuration file")
|
||||
} else {
|
||||
if _, err := os.Create(path); err != nil {
|
||||
return errors.Wrap(err, "failed to create missing configuration file")
|
||||
return errors.WithMessage(err, "failed to create missing configuration file")
|
||||
}
|
||||
}
|
||||
|
||||
return f.Parse(path, true)
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Parses an xml file.
|
||||
@@ -225,7 +220,7 @@ func (f *ConfigurationFile) parseXmlFile(path string) error {
|
||||
parts := strings.Split(replacement.Match, ".")
|
||||
|
||||
// Set the initial element to be the root element, and then work from there.
|
||||
var element = doc.Root()
|
||||
element := doc.Root()
|
||||
|
||||
// Iterate over the path to create the required structure for the given element's path.
|
||||
// This does not set a value, only ensures that the base structure exists. We start at index
|
||||
@@ -354,12 +349,12 @@ func (f *ConfigurationFile) parseJsonFile(path string) error {
|
||||
func (f *ConfigurationFile) parseYamlFile(path string) error {
|
||||
b, err := readFileBytes(path)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
i := make(map[string]interface{})
|
||||
if err := yaml.Unmarshal(b, &i); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal the yaml data into a JSON interface such that we can work with
|
||||
@@ -367,20 +362,20 @@ func (f *ConfigurationFile) parseYamlFile(path string) error {
|
||||
// makes working with unknown JSON significantly easier.
|
||||
jsonBytes, err := json.Marshal(dyno.ConvertMapI2MapS(i))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Now that the data is converted, treat it just like JSON and pass it to the
|
||||
// iterator function to update values as necessary.
|
||||
data, err := f.IterateOverJson(jsonBytes)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remarshal the JSON into YAML format before saving it back to the disk.
|
||||
marshaled, err := yaml.Marshal(data.Data())
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path, marshaled, 0644)
|
||||
@@ -392,7 +387,7 @@ func (f *ConfigurationFile) parseYamlFile(path string) error {
|
||||
func (f *ConfigurationFile) parseTextFile(path string) error {
|
||||
input, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
lines := strings.Split(string(input), "\n")
|
||||
@@ -409,7 +404,7 @@ func (f *ConfigurationFile) parseTextFile(path string) error {
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(path, []byte(strings.Join(lines, "\n")), 0644); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -421,7 +416,7 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
||||
// Open the file.
|
||||
f2, err := os.Open(path)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var s strings.Builder
|
||||
@@ -443,20 +438,20 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
||||
|
||||
// Handle any scanner errors.
|
||||
if err := scanner.Err(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode the properties file.
|
||||
p, err := properties.LoadFile(path, properties.UTF8)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Replace any values that need to be replaced.
|
||||
for _, replace := range f.Replace {
|
||||
data, err := f.LookupConfigurationValue(replace)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
v, ok := p.Get(replace.Match)
|
||||
@@ -468,7 +463,7 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
||||
}
|
||||
|
||||
if _, _, err := p.Set(replace.Match, data); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -488,7 +483,7 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
||||
// Open the file for writing.
|
||||
w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
|
||||
40
remote/errors.go
Normal file
40
remote/errors.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type RequestErrors struct {
|
||||
Errors []RequestError `json:"errors"`
|
||||
}
|
||||
|
||||
type RequestError struct {
|
||||
response *http.Response
|
||||
Code string `json:"code"`
|
||||
Status string `json:"status"`
|
||||
Detail string `json:"detail"`
|
||||
}
|
||||
|
||||
func IsRequestError(err error) bool {
|
||||
_, ok := err.(*RequestError)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// Returns the error response in a string form that can be more easily consumed.
|
||||
func (re *RequestError) Error() string {
|
||||
c := 0
|
||||
if re.response != nil {
|
||||
c = re.response.StatusCode
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Error response from Panel: %s: %s (HTTP/%d)", re.Code, re.Detail, c)
|
||||
}
|
||||
|
||||
type SftpInvalidCredentialsError struct {
|
||||
}
|
||||
|
||||
func (ice SftpInvalidCredentialsError) Error() string {
|
||||
return "the credentials provided were invalid"
|
||||
}
|
||||
230
remote/http.go
Normal file
230
remote/http.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
GetBackupRemoteUploadURLs(ctx context.Context, backup string, size int64) (BackupRemoteUploadResponse, error)
|
||||
GetInstallationScript(ctx context.Context, uuid string) (InstallationScript, error)
|
||||
GetServerConfiguration(ctx context.Context, uuid string) (ServerConfigurationResponse, error)
|
||||
GetServers(context context.Context, perPage int) ([]RawServerData, error)
|
||||
ResetServersState(ctx context.Context) error
|
||||
SetArchiveStatus(ctx context.Context, uuid string, successful bool) error
|
||||
SetBackupStatus(ctx context.Context, backup string, data BackupRequest) error
|
||||
SendRestorationStatus(ctx context.Context, backup string, successful bool) error
|
||||
SetInstallationStatus(ctx context.Context, uuid string, successful bool) error
|
||||
SetTransferStatus(ctx context.Context, uuid string, successful bool) error
|
||||
ValidateSftpCredentials(ctx context.Context, request SftpAuthRequest) (SftpAuthResponse, error)
|
||||
}
|
||||
|
||||
type client struct {
|
||||
httpClient *http.Client
|
||||
baseUrl string
|
||||
tokenId string
|
||||
token string
|
||||
attempts int
|
||||
}
|
||||
|
||||
// New returns a new HTTP request client that is used for making authenticated
|
||||
// requests to the Panel that this instance is running under.
|
||||
func New(base string, opts ...ClientOption) Client {
|
||||
c := client{
|
||||
baseUrl: strings.TrimSuffix(base, "/") + "/api/remote",
|
||||
httpClient: &http.Client{
|
||||
Timeout: time.Second * 15,
|
||||
},
|
||||
attempts: 1,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&c)
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// WithCredentials sets the credentials to use when making request to the remote
|
||||
// API endpoint.
|
||||
func WithCredentials(id, token string) ClientOption {
|
||||
return func(c *client) {
|
||||
c.tokenId = id
|
||||
c.token = token
|
||||
}
|
||||
}
|
||||
|
||||
// WithHttpClient sets the underlying HTTP client instance to use when making
|
||||
// requests to the Panel API.
|
||||
func WithHttpClient(httpClient *http.Client) ClientOption {
|
||||
return func(c *client) {
|
||||
c.httpClient = httpClient
|
||||
}
|
||||
}
|
||||
|
||||
// requestOnce creates a http request and executes it once. Prefer request()
|
||||
// over this method when possible. It appends the path to the endpoint of the
|
||||
// client and adds the authentication token to the request.
|
||||
func (c *client) requestOnce(ctx context.Context, method, path string, body io.Reader, opts ...func(r *http.Request)) (*Response, error) {
|
||||
req, err := http.NewRequest(method, c.baseUrl+path, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("User-Agent", fmt.Sprintf("Pterodactyl Wings/v%s (id:%s)", system.Version, c.tokenId))
|
||||
req.Header.Set("Accept", "application/vnd.pterodactyl.v1+json")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s.%s", c.tokenId, c.token))
|
||||
|
||||
// Call all opts functions to allow modifying the request
|
||||
for _, o := range opts {
|
||||
o(req)
|
||||
}
|
||||
|
||||
debugLogRequest(req)
|
||||
|
||||
res, err := c.httpClient.Do(req.WithContext(ctx))
|
||||
return &Response{res}, err
|
||||
}
|
||||
|
||||
// request executes a http request and attempts when errors occur.
|
||||
// It appends the path to the endpoint of the client and adds the authentication token to the request.
|
||||
func (c *client) request(ctx context.Context, method, path string, body io.Reader, opts ...func(r *http.Request)) (res *Response, err error) {
|
||||
for i := 0; i < c.attempts; i++ {
|
||||
res, err = c.requestOnce(ctx, method, path, body, opts...)
|
||||
if err == nil &&
|
||||
res.StatusCode < http.StatusInternalServerError &&
|
||||
res.StatusCode != http.StatusTooManyRequests {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// get executes a http get request.
|
||||
func (c *client) get(ctx context.Context, path string, query q) (*Response, error) {
|
||||
return c.request(ctx, http.MethodGet, path, nil, func(r *http.Request) {
|
||||
q := r.URL.Query()
|
||||
for k, v := range query {
|
||||
q.Set(k, v)
|
||||
}
|
||||
r.URL.RawQuery = q.Encode()
|
||||
})
|
||||
}
|
||||
|
||||
// post executes a http post request.
|
||||
func (c *client) post(ctx context.Context, path string, data interface{}) (*Response, error) {
|
||||
b, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.request(ctx, http.MethodPost, path, bytes.NewBuffer(b))
|
||||
}
|
||||
|
||||
// Response is a custom response type that allows for commonly used error
|
||||
// handling and response parsing from the Panel API. This just embeds the normal
|
||||
// HTTP response from Go and we attach a few helper functions to it.
|
||||
type Response struct {
|
||||
*http.Response
|
||||
}
|
||||
|
||||
// HasError determines if the API call encountered an error. If no request has
|
||||
// been made the response will be false. This function will evaluate to true if
|
||||
// the response code is anything 300 or higher.
|
||||
func (r *Response) HasError() bool {
|
||||
if r.Response == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return r.StatusCode >= 300 || r.StatusCode < 200
|
||||
}
|
||||
|
||||
// Reads the body from the response and returns it, then replaces it on the response
|
||||
// so that it can be read again later. This does not close the response body, so any
|
||||
// functions calling this should be sure to manually defer a Body.Close() call.
|
||||
func (r *Response) Read() ([]byte, error) {
|
||||
var b []byte
|
||||
if r.Response == nil {
|
||||
return nil, errors.New("http: attempting to read missing response")
|
||||
}
|
||||
|
||||
if r.Response.Body != nil {
|
||||
b, _ = ioutil.ReadAll(r.Response.Body)
|
||||
}
|
||||
|
||||
r.Response.Body = ioutil.NopCloser(bytes.NewBuffer(b))
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BindJSON binds a given interface with the data returned in the response. This
|
||||
// is a shortcut for calling Read and then manually calling json.Unmarshal on
|
||||
// the raw bytes.
|
||||
func (r *Response) BindJSON(v interface{}) error {
|
||||
b, err := r.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return errors.Wrap(err, "http: could not unmarshal response")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the first error message from the API call as a string. The error
|
||||
// message will be formatted similar to the below example:
|
||||
//
|
||||
// HttpNotFoundException: The requested resource does not exist. (HTTP/404)
|
||||
func (r *Response) Error() error {
|
||||
if !r.HasError() {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errs RequestErrors
|
||||
_ = r.BindJSON(&errs)
|
||||
|
||||
e := &RequestError{}
|
||||
if len(errs.Errors) > 0 {
|
||||
e = &errs.Errors[0]
|
||||
}
|
||||
|
||||
e.response = r.Response
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
// Logs the request into the debug log with all of the important request bits.
|
||||
// The authorization key will be cleaned up before being output.
|
||||
func debugLogRequest(req *http.Request) {
|
||||
if l, ok := log.Log.(*log.Logger); ok && l.Level != log.DebugLevel {
|
||||
return
|
||||
}
|
||||
headers := make(map[string][]string)
|
||||
for k, v := range req.Header {
|
||||
if k != "Authorization" || len(v) == 0 || len(v[0]) == 0 {
|
||||
headers[k] = v
|
||||
continue
|
||||
}
|
||||
|
||||
headers[k] = []string{"(redacted)"}
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"method": req.Method,
|
||||
"endpoint": req.URL.String(),
|
||||
"headers": headers,
|
||||
}).Debug("making request to external HTTP endpoint")
|
||||
}
|
||||
93
remote/http_test.go
Normal file
93
remote/http_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func createTestClient(h http.HandlerFunc) (*client, *httptest.Server) {
|
||||
s := httptest.NewServer(h)
|
||||
c := &client{
|
||||
httpClient: s.Client(),
|
||||
baseUrl: s.URL,
|
||||
|
||||
attempts: 1,
|
||||
tokenId: "testid",
|
||||
token: "testtoken",
|
||||
}
|
||||
return c, s
|
||||
}
|
||||
|
||||
func TestRequest(t *testing.T) {
|
||||
c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, "application/vnd.pterodactyl.v1+json", r.Header.Get("Accept"))
|
||||
assert.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
assert.Equal(t, "Bearer testid.testtoken", r.Header.Get("Authorization"))
|
||||
assert.Equal(t, "/test", r.URL.Path)
|
||||
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
})
|
||||
r, err := c.requestOnce(context.Background(), "", "/test", nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
}
|
||||
|
||||
func TestRequestRetry(t *testing.T) {
|
||||
// Test if the client attempts failed requests
|
||||
i := 0
|
||||
c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if i < 1 {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
} else {
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
}
|
||||
i++
|
||||
})
|
||||
c.attempts = 2
|
||||
r, err := c.request(context.Background(), "", "", nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
assert.Equal(t, http.StatusOK, r.StatusCode)
|
||||
assert.Equal(t, 2, i)
|
||||
|
||||
// Test whether the client returns the last request after retry limit is reached
|
||||
i = 0
|
||||
c, _ = createTestClient(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
i++
|
||||
})
|
||||
c.attempts = 2
|
||||
r, err = c.request(context.Background(), "get", "", nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
assert.Equal(t, http.StatusInternalServerError, r.StatusCode)
|
||||
assert.Equal(t, 2, i)
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, http.MethodGet, r.Method)
|
||||
assert.Len(t, r.URL.Query(), 1)
|
||||
assert.Equal(t, "world", r.URL.Query().Get("hello"))
|
||||
})
|
||||
r, err := c.get(context.Background(), "/test", q{"hello": "world"})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
}
|
||||
|
||||
func TestPost(t *testing.T) {
|
||||
test := map[string]string{
|
||||
"hello": "world",
|
||||
}
|
||||
c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, http.MethodPost, r.Method)
|
||||
|
||||
})
|
||||
r, err := c.post(context.Background(), "/test", test)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
}
|
||||
225
remote/servers.go
Normal file
225
remote/servers.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
ProcessStopCommand = "command"
|
||||
ProcessStopSignal = "signal"
|
||||
ProcessStopNativeStop = "stop"
|
||||
)
|
||||
|
||||
// GetServers returns all of the servers that are present on the Panel making
|
||||
// parallel API calls to the endpoint if more than one page of servers is
|
||||
// returned.
|
||||
func (c *client) GetServers(ctx context.Context, limit int) ([]RawServerData, error) {
|
||||
servers, meta, err := c.getServersPaged(ctx, 0, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
if meta.LastPage > 1 {
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
for page := meta.CurrentPage + 1; page <= meta.LastPage; page++ {
|
||||
page := page
|
||||
g.Go(func() error {
|
||||
ps, _, err := c.getServersPaged(ctx, int(page), limit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
servers = append(servers, ps...)
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return servers, nil
|
||||
}
|
||||
|
||||
// ResetServersState updates the state of all servers on the node that are
|
||||
// currently marked as "installing" or "restoring from backup" to be marked as
|
||||
// a normal successful install state.
|
||||
//
|
||||
// This handles Wings exiting during either of these processes which will leave
|
||||
// things in a bad state within the Panel. This API call is executed once Wings
|
||||
// has fully booted all of the servers.
|
||||
func (c *client) ResetServersState(ctx context.Context) error {
|
||||
res, err := c.post(ctx, "/servers/reset", nil)
|
||||
if err != nil {
|
||||
return errors.WrapIf(err, "remote/servers: failed to reset server state on Panel")
|
||||
}
|
||||
res.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *client) GetServerConfiguration(ctx context.Context, uuid string) (ServerConfigurationResponse, error) {
|
||||
var config ServerConfigurationResponse
|
||||
res, err := c.get(ctx, fmt.Sprintf("/servers/%s", uuid), nil)
|
||||
if err != nil {
|
||||
return config, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.HasError() {
|
||||
return config, res.Error()
|
||||
}
|
||||
|
||||
err = res.BindJSON(&config)
|
||||
return config, err
|
||||
}
|
||||
|
||||
func (c *client) GetInstallationScript(ctx context.Context, uuid string) (InstallationScript, error) {
|
||||
res, err := c.get(ctx, fmt.Sprintf("/servers/%s/install", uuid), nil)
|
||||
if err != nil {
|
||||
return InstallationScript{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.HasError() {
|
||||
return InstallationScript{}, res.Error()
|
||||
}
|
||||
|
||||
var config InstallationScript
|
||||
err = res.BindJSON(&config)
|
||||
return config, err
|
||||
}
|
||||
|
||||
func (c *client) SetInstallationStatus(ctx context.Context, uuid string, successful bool) error {
|
||||
resp, err := c.post(ctx, fmt.Sprintf("/servers/%s/install", uuid), d{"successful": successful})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.Error()
|
||||
}
|
||||
|
||||
func (c *client) SetArchiveStatus(ctx context.Context, uuid string, successful bool) error {
|
||||
resp, err := c.post(ctx, fmt.Sprintf("/servers/%s/archive", uuid), d{"successful": successful})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.Error()
|
||||
}
|
||||
|
||||
func (c *client) SetTransferStatus(ctx context.Context, uuid string, successful bool) error {
|
||||
state := "failure"
|
||||
if successful {
|
||||
state = "success"
|
||||
}
|
||||
resp, err := c.get(ctx, fmt.Sprintf("/servers/%s/transfer/%s", uuid, state), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.Error()
|
||||
}
|
||||
|
||||
// ValidateSftpCredentials makes a request to determine if the username and
|
||||
// password combination provided is associated with a valid server on the instance
|
||||
// using the Panel's authentication control mechanisms. This will get itself
|
||||
// throttled if too many requests are made, allowing us to completely offload
|
||||
// all of the authorization security logic to the Panel.
|
||||
func (c *client) ValidateSftpCredentials(ctx context.Context, request SftpAuthRequest) (SftpAuthResponse, error) {
|
||||
var auth SftpAuthResponse
|
||||
res, err := c.post(ctx, "/sftp/auth", request)
|
||||
if err != nil {
|
||||
return auth, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
e := res.Error()
|
||||
if e != nil {
|
||||
if res.StatusCode >= 400 && res.StatusCode < 500 {
|
||||
log.WithFields(log.Fields{
|
||||
"subsystem": "sftp",
|
||||
"username": request.User,
|
||||
"ip": request.IP,
|
||||
}).Warn(e.Error())
|
||||
|
||||
return auth, &SftpInvalidCredentialsError{}
|
||||
}
|
||||
|
||||
return auth, errors.New(e.Error())
|
||||
}
|
||||
|
||||
err = res.BindJSON(&auth)
|
||||
return auth, err
|
||||
}
|
||||
|
||||
func (c *client) GetBackupRemoteUploadURLs(ctx context.Context, backup string, size int64) (BackupRemoteUploadResponse, error) {
|
||||
var data BackupRemoteUploadResponse
|
||||
res, err := c.get(ctx, fmt.Sprintf("/backups/%s", backup), q{"size": strconv.FormatInt(size, 10)})
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.HasError() {
|
||||
return data, res.Error()
|
||||
}
|
||||
|
||||
err = res.BindJSON(&data)
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (c *client) SetBackupStatus(ctx context.Context, backup string, data BackupRequest) error {
|
||||
resp, err := c.post(ctx, fmt.Sprintf("/backups/%s", backup), data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.Error()
|
||||
}
|
||||
|
||||
// SendRestorationStatus triggers a request to the Panel to notify it that a
|
||||
// restoration has been completed and the server should be marked as being
|
||||
// activated again.
|
||||
func (c *client) SendRestorationStatus(ctx context.Context, backup string, successful bool) error {
|
||||
resp, err := c.post(ctx, fmt.Sprintf("/backups/%s/restore", backup), d{"successful": successful})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return resp.Error()
|
||||
}
|
||||
|
||||
// getServersPaged returns a subset of servers from the Panel API using the
|
||||
// pagination query parameters.
|
||||
func (c *client) getServersPaged(ctx context.Context, page, limit int) ([]RawServerData, Pagination, error) {
|
||||
var r struct {
|
||||
Data []RawServerData `json:"data"`
|
||||
Meta Pagination `json:"meta"`
|
||||
}
|
||||
|
||||
res, err := c.get(ctx, "/servers", q{
|
||||
"page": strconv.Itoa(page),
|
||||
"per_page": strconv.Itoa(limit),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, r.Meta, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.HasError() {
|
||||
return nil, r.Meta, res.Error()
|
||||
}
|
||||
if err := res.BindJSON(&r); err != nil {
|
||||
return nil, r.Meta, err
|
||||
}
|
||||
return r.Data, r.Meta, nil
|
||||
}
|
||||
154
remote/types.go
Normal file
154
remote/types.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/apex/log"
|
||||
"github.com/pterodactyl/wings/parser"
|
||||
)
|
||||
|
||||
// A generic type allowing for easy binding use when making requests to API
|
||||
// endpoints that only expect a singular argument or something that would not
|
||||
// benefit from being a typed struct.
|
||||
//
|
||||
// Inspired by gin.H, same concept.
|
||||
type d map[string]interface{}
|
||||
|
||||
// Same concept as d, but a map of strings, used for querying GET requests.
|
||||
type q map[string]string
|
||||
|
||||
type ClientOption func(c *client)
|
||||
|
||||
type Pagination struct {
|
||||
CurrentPage uint `json:"current_page"`
|
||||
From uint `json:"from"`
|
||||
LastPage uint `json:"last_page"`
|
||||
PerPage uint `json:"per_page"`
|
||||
To uint `json:"to"`
|
||||
Total uint `json:"total"`
|
||||
}
|
||||
|
||||
// ServerConfigurationResponse holds the server configuration data returned from
|
||||
// the Panel. When a server process is started, Wings communicates with the
|
||||
// Panel to fetch the latest build information as well as get all of the details
|
||||
// needed to parse the given Egg.
|
||||
//
|
||||
// This means we do not need to hit Wings each time part of the server is
|
||||
// updated, and the Panel serves as the source of truth at all times. This also
|
||||
// means if a configuration is accidentally wiped on Wings we can self-recover
|
||||
// without too much hassle, so long as Wings is aware of what servers should
|
||||
// exist on it.
|
||||
type ServerConfigurationResponse struct {
|
||||
Settings json.RawMessage `json:"settings"`
|
||||
ProcessConfiguration *ProcessConfiguration `json:"process_configuration"`
|
||||
}
|
||||
|
||||
// InstallationScript defines installation script information for a server
|
||||
// process. This is used when a server is installed for the first time, and when
|
||||
// a server is marked for re-installation.
|
||||
type InstallationScript struct {
|
||||
ContainerImage string `json:"container_image"`
|
||||
Entrypoint string `json:"entrypoint"`
|
||||
Script string `json:"script"`
|
||||
}
|
||||
|
||||
// RawServerData is a raw response from the API for a server.
|
||||
type RawServerData struct {
|
||||
Uuid string `json:"uuid"`
|
||||
Settings json.RawMessage `json:"settings"`
|
||||
ProcessConfiguration json.RawMessage `json:"process_configuration"`
|
||||
}
|
||||
|
||||
// SftpAuthRequest defines the request details that are passed along to the Panel
|
||||
// when determining if the credentials provided to Wings are valid.
|
||||
type SftpAuthRequest struct {
|
||||
User string `json:"username"`
|
||||
Pass string `json:"password"`
|
||||
IP string `json:"ip"`
|
||||
SessionID []byte `json:"session_id"`
|
||||
ClientVersion []byte `json:"client_version"`
|
||||
}
|
||||
|
||||
// SftpAuthResponse is returned by the Panel when a pair of SFTP credentials
|
||||
// is successfully validated. This will include the specific server that was
|
||||
// matched as well as the permissions that are assigned to the authenticated
|
||||
// user for the SFTP subsystem.
|
||||
type SftpAuthResponse struct {
|
||||
Server string `json:"server"`
|
||||
Token string `json:"token"`
|
||||
Permissions []string `json:"permissions"`
|
||||
}
|
||||
|
||||
type OutputLineMatcher struct {
|
||||
// The raw string to match against. This may or may not be prefixed with
|
||||
// regex: which indicates we want to match against the regex expression.
|
||||
raw string
|
||||
reg *regexp.Regexp
|
||||
}
|
||||
|
||||
// Matches determines if a given string "s" matches the given line.
|
||||
func (olm *OutputLineMatcher) Matches(s string) bool {
|
||||
if olm.reg == nil {
|
||||
return strings.Contains(s, olm.raw)
|
||||
}
|
||||
|
||||
return olm.reg.MatchString(s)
|
||||
}
|
||||
|
||||
// String returns the matcher's raw comparison string.
|
||||
func (olm *OutputLineMatcher) String() string {
|
||||
return olm.raw
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals the startup lines into individual structs for easier
|
||||
// matching abilities.
|
||||
func (olm *OutputLineMatcher) UnmarshalJSON(data []byte) error {
|
||||
if err := json.Unmarshal(data, &olm.raw); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(olm.raw, "regex:") && len(olm.raw) > 6 {
|
||||
r, err := regexp.Compile(strings.TrimPrefix(olm.raw, "regex:"))
|
||||
if err != nil {
|
||||
log.WithField("error", err).WithField("raw", olm.raw).Warn("failed to compile output line marked as being regex")
|
||||
}
|
||||
|
||||
olm.reg = r
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessStopConfiguration defines what is used when stopping an instance.
|
||||
type ProcessStopConfiguration struct {
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// ProcessConfiguration defines the process configuration for a given server
|
||||
// instance. This sets what Wings is looking for to mark a server as done starting
|
||||
// what to do when stopping, and what changes to make to the configuration file
|
||||
// for a server.
|
||||
type ProcessConfiguration struct {
|
||||
Startup struct {
|
||||
Done []*OutputLineMatcher `json:"done"`
|
||||
UserInteraction []string `json:"user_interaction"`
|
||||
StripAnsi bool `json:"strip_ansi"`
|
||||
} `json:"startup"`
|
||||
Stop ProcessStopConfiguration `json:"stop"`
|
||||
ConfigurationFiles []parser.ConfigurationFile `json:"configs"`
|
||||
}
|
||||
|
||||
type BackupRemoteUploadResponse struct {
|
||||
Parts []string `json:"parts"`
|
||||
PartSize int64 `json:"part_size"`
|
||||
}
|
||||
|
||||
type BackupRequest struct {
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
Size int64 `json:"size"`
|
||||
Successful bool `json:"successful"`
|
||||
}
|
||||
327
router/downloader/downloader.go
Normal file
327
router/downloader/downloader.go
Normal file
@@ -0,0 +1,327 @@
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"emperror.dev/errors"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var client = &http.Client{
|
||||
Timeout: time.Hour * 12,
|
||||
// Disallow any redirect on a HTTP call. This is a security requirement: do not modify
|
||||
// this logic without first ensuring that the new target location IS NOT within the current
|
||||
// instance's local network.
|
||||
//
|
||||
// This specific error response just causes the client to not follow the redirect and
|
||||
// returns the actual redirect response to the caller. Not perfect, but simple and most
|
||||
// people won't be using URLs that redirect anyways hopefully?
|
||||
//
|
||||
// We'll re-evaluate this down the road if needed.
|
||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
},
|
||||
}
|
||||
|
||||
var instance = &Downloader{
|
||||
// Tracks all of the active downloads.
|
||||
downloadCache: make(map[string]*Download),
|
||||
// Tracks all of the downloads active for a given server instance. This is
|
||||
// primarily used to make things quicker and keep the code a little more
|
||||
// legible throughout here.
|
||||
serverCache: make(map[string][]string),
|
||||
}
|
||||
|
||||
// Regex to match the end of an IPv4/IPv6 address. This allows the port to be removed
|
||||
// so that we are just working with the raw IP address in question.
|
||||
var ipMatchRegex = regexp.MustCompile(`(:\d+)$`)
|
||||
|
||||
// Internal IP ranges that should be blocked if the resource requested resolves within.
|
||||
var internalRanges = []*net.IPNet{
|
||||
mustParseCIDR("127.0.0.1/8"),
|
||||
mustParseCIDR("10.0.0.0/8"),
|
||||
mustParseCIDR("172.16.0.0/12"),
|
||||
mustParseCIDR("192.168.0.0/16"),
|
||||
mustParseCIDR("169.254.0.0/16"),
|
||||
mustParseCIDR("::1/128"),
|
||||
mustParseCIDR("fe80::/10"),
|
||||
mustParseCIDR("fc00::/7"),
|
||||
}
|
||||
|
||||
const ErrInternalResolution = errors.Sentinel("downloader: destination resolves to internal network location")
|
||||
const ErrInvalidIPAddress = errors.Sentinel("downloader: invalid IP address")
|
||||
const ErrDownloadFailed = errors.Sentinel("downloader: download request failed")
|
||||
|
||||
type Counter struct {
|
||||
total int
|
||||
onWrite func(total int)
|
||||
}
|
||||
|
||||
func (c *Counter) Write(p []byte) (int, error) {
|
||||
n := len(p)
|
||||
c.total += n
|
||||
c.onWrite(c.total)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
type DownloadRequest struct {
|
||||
URL *url.URL
|
||||
Directory string
|
||||
}
|
||||
|
||||
type Download struct {
|
||||
Identifier string
|
||||
mu sync.RWMutex
|
||||
req DownloadRequest
|
||||
server *server.Server
|
||||
progress float64
|
||||
cancelFunc *context.CancelFunc
|
||||
}
|
||||
|
||||
// Starts a new tracked download which allows for cancellation later on by calling
|
||||
// the Downloader.Cancel function.
|
||||
func New(s *server.Server, r DownloadRequest) *Download {
|
||||
dl := Download{
|
||||
Identifier: uuid.Must(uuid.NewRandom()).String(),
|
||||
req: r,
|
||||
server: s,
|
||||
}
|
||||
instance.track(&dl)
|
||||
return &dl
|
||||
}
|
||||
|
||||
// Returns all of the tracked downloads for a given server instance.
|
||||
func ByServer(sid string) []*Download {
|
||||
instance.mu.Lock()
|
||||
defer instance.mu.Unlock()
|
||||
var downloads []*Download
|
||||
if v, ok := instance.serverCache[sid]; ok {
|
||||
for _, id := range v {
|
||||
if dl, dlok := instance.downloadCache[id]; dlok {
|
||||
downloads = append(downloads, dl)
|
||||
}
|
||||
}
|
||||
}
|
||||
return downloads
|
||||
}
|
||||
|
||||
// Returns a single Download matching a given identifier. If no download is found
|
||||
// the second argument in the response will be false.
|
||||
func ByID(dlid string) *Download {
|
||||
return instance.find(dlid)
|
||||
}
|
||||
|
||||
//goland:noinspection GoVetCopyLock
|
||||
func (dl Download) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Identifier string
|
||||
Progress float64
|
||||
}{
|
||||
Identifier: dl.Identifier,
|
||||
Progress: dl.Progress(),
|
||||
})
|
||||
}
|
||||
|
||||
// Executes a given download for the server and begins writing the file to the disk. Once
|
||||
// completed the download will be removed from the cache.
|
||||
func (dl *Download) Execute() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Hour*12)
|
||||
dl.cancelFunc = &cancel
|
||||
defer dl.Cancel()
|
||||
|
||||
// Always ensure that we're checking the destination for the download to avoid a malicious
|
||||
// user from accessing internal network resources.
|
||||
if err := dl.isExternalNetwork(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// At this point we have verified the destination is not within the local network, so we can
|
||||
// now make a request to that URL and pull down the file, saving it to the server's data
|
||||
// directory.
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, dl.req.URL.String(), nil)
|
||||
if err != nil {
|
||||
return errors.WrapIf(err, "downloader: failed to create request")
|
||||
}
|
||||
|
||||
req.Header.Set("User-Agent", "Pterodactyl Panel (https://pterodactyl.io)")
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return ErrDownloadFailed
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return errors.New("downloader: got bad response status from endpoint: " + res.Status)
|
||||
}
|
||||
|
||||
// If there is a Content-Length header on this request go ahead and check that we can
|
||||
// even write the whole file before beginning this process. If there is no header present
|
||||
// we'll just have to give it a spin and see how it goes.
|
||||
if res.ContentLength > 0 {
|
||||
if err := dl.server.Filesystem().HasSpaceFor(res.ContentLength); err != nil {
|
||||
return errors.WrapIf(err, "downloader: failed to write file: not enough space")
|
||||
}
|
||||
}
|
||||
|
||||
fnameparts := strings.Split(dl.req.URL.Path, "/")
|
||||
p := filepath.Join(dl.req.Directory, fnameparts[len(fnameparts)-1])
|
||||
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
|
||||
|
||||
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
|
||||
if err := dl.server.Filesystem().Writefile(p, r); err != nil {
|
||||
return errors.WrapIf(err, "downloader: failed to write file to server directory")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cancels a running download and frees up the associated resources. If a file is being
|
||||
// written a partial file will remain present on the disk.
|
||||
func (dl *Download) Cancel() {
|
||||
if dl.cancelFunc != nil {
|
||||
(*dl.cancelFunc)()
|
||||
}
|
||||
instance.remove(dl.Identifier)
|
||||
}
|
||||
|
||||
// Checks if the given download belongs to the provided server.
|
||||
func (dl *Download) BelongsTo(s *server.Server) bool {
|
||||
return dl.server.Id() == s.Id()
|
||||
}
|
||||
|
||||
// Returns the current progress of the download as a float value between 0 and 1 where
|
||||
// 1 indicates that the download is completed.
|
||||
func (dl *Download) Progress() float64 {
|
||||
dl.mu.RLock()
|
||||
defer dl.mu.RUnlock()
|
||||
return dl.progress
|
||||
}
|
||||
|
||||
// Handles a write event by updating the progress completed percentage and firing off
|
||||
// events to the server websocket as needed.
|
||||
func (dl *Download) counter(contentLength int64) *Counter {
|
||||
onWrite := func(t int) {
|
||||
dl.mu.Lock()
|
||||
defer dl.mu.Unlock()
|
||||
dl.progress = float64(t) / float64(contentLength)
|
||||
}
|
||||
return &Counter{
|
||||
onWrite: onWrite,
|
||||
}
|
||||
}
|
||||
|
||||
// Verifies that a given download resolves to a location not within the current local
|
||||
// network for the machine. If the final destination of a resource is within the local
|
||||
// network an ErrInternalResolution error is returned.
|
||||
func (dl *Download) isExternalNetwork(ctx context.Context) error {
|
||||
dialer := &net.Dialer{
|
||||
LocalAddr: nil,
|
||||
}
|
||||
|
||||
host := dl.req.URL.Host
|
||||
|
||||
// This cluster-fuck of math and integer shit converts an integer IP into a proper IPv4.
|
||||
// For example: 16843009 would become 1.1.1.1
|
||||
if i, err := strconv.ParseInt(host, 10, 64); err == nil {
|
||||
host = strconv.FormatInt((i>>24)&0xFF, 10) + "." + strconv.FormatInt((i>>16)&0xFF, 10) + "." + strconv.FormatInt((i>>8)&0xFF, 10) + "." + strconv.FormatInt(i&0xFF, 10)
|
||||
}
|
||||
|
||||
if !ipMatchRegex.MatchString(host) {
|
||||
if dl.req.URL.Scheme == "https" {
|
||||
host = host + ":443"
|
||||
} else {
|
||||
host = host + ":80"
|
||||
}
|
||||
}
|
||||
|
||||
c, err := dialer.DialContext(ctx, "tcp", host)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
_ = c.Close()
|
||||
|
||||
ip := net.ParseIP(ipMatchRegex.ReplaceAllString(c.RemoteAddr().String(), ""))
|
||||
if ip == nil {
|
||||
return errors.WithStack(ErrInvalidIPAddress)
|
||||
}
|
||||
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
|
||||
return errors.WithStack(ErrInternalResolution)
|
||||
}
|
||||
for _, block := range internalRanges {
|
||||
if block.Contains(ip) {
|
||||
return errors.WithStack(ErrInternalResolution)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Defines a global downloader struct that keeps track of all currently processing downloads
|
||||
// for the machine.
|
||||
type Downloader struct {
|
||||
mu sync.RWMutex
|
||||
downloadCache map[string]*Download
|
||||
serverCache map[string][]string
|
||||
}
|
||||
|
||||
// Tracks a download in the internal cache for this instance.
|
||||
func (d *Downloader) track(dl *Download) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
sid := dl.server.Id()
|
||||
if _, ok := d.downloadCache[dl.Identifier]; !ok {
|
||||
d.downloadCache[dl.Identifier] = dl
|
||||
if _, ok := d.serverCache[sid]; !ok {
|
||||
d.serverCache[sid] = []string{}
|
||||
}
|
||||
d.serverCache[sid] = append(d.serverCache[sid], dl.Identifier)
|
||||
}
|
||||
}
|
||||
|
||||
// Finds a given download entry using the provided ID and returns it.
|
||||
func (d *Downloader) find(dlid string) *Download {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
if entry, ok := d.downloadCache[dlid]; ok {
|
||||
return entry
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the given download reference from the cache storing them. This also updates
|
||||
// the slice of active downloads for a given server to not include this download.
|
||||
func (d *Downloader) remove(dlid string) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
if _, ok := d.downloadCache[dlid]; !ok {
|
||||
return
|
||||
}
|
||||
sid := d.downloadCache[dlid].server.Id()
|
||||
delete(d.downloadCache, dlid)
|
||||
if tracked, ok := d.serverCache[sid]; ok {
|
||||
var out []string
|
||||
for _, k := range tracked {
|
||||
if k != dlid {
|
||||
out = append(out, k)
|
||||
}
|
||||
}
|
||||
d.serverCache[sid] = out
|
||||
}
|
||||
}
|
||||
|
||||
func mustParseCIDR(ip string) *net.IPNet {
|
||||
_, block, err := net.ParseCIDR(ip)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("downloader: failed to parse CIDR: %s", err))
|
||||
}
|
||||
return block
|
||||
}
|
||||
161
router/error.go
161
router/error.go
@@ -2,57 +2,64 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
)
|
||||
|
||||
type RequestError struct {
|
||||
Err error
|
||||
Uuid string
|
||||
Message string
|
||||
err error
|
||||
uuid string
|
||||
message string
|
||||
server *server.Server
|
||||
}
|
||||
|
||||
// Attaches an error to the gin.Context object for the request and ensures that it
|
||||
// has a proper stacktrace associated with it when doing so.
|
||||
//
|
||||
// If you just call c.Error(err) without using this function you'll likely end up
|
||||
// with an error that has no annotated stack on it.
|
||||
func WithError(c *gin.Context, err error) error {
|
||||
return c.Error(errors.WithStackDepthIf(err, 1))
|
||||
}
|
||||
|
||||
// Generates a new tracked error, which simply tracks the specific error that
|
||||
// is being passed in, and also assigned a UUID to the error so that it can be
|
||||
// cross referenced in the logs.
|
||||
func TrackedError(err error) *RequestError {
|
||||
func NewTrackedError(err error) *RequestError {
|
||||
return &RequestError{
|
||||
Err: err,
|
||||
Uuid: uuid.Must(uuid.NewRandom()).String(),
|
||||
Message: "",
|
||||
err: err,
|
||||
uuid: uuid.Must(uuid.NewRandom()).String(),
|
||||
}
|
||||
}
|
||||
|
||||
// Same as TrackedError, except this will also attach the server instance that
|
||||
// Same as NewTrackedError, except this will also attach the server instance that
|
||||
// generated this server for the purposes of logging.
|
||||
func TrackedServerError(err error, s *server.Server) *RequestError {
|
||||
func NewServerError(err error, s *server.Server) *RequestError {
|
||||
return &RequestError{
|
||||
Err: err,
|
||||
Uuid: uuid.Must(uuid.NewRandom()).String(),
|
||||
Message: "",
|
||||
err: err,
|
||||
uuid: uuid.Must(uuid.NewRandom()).String(),
|
||||
server: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *RequestError) logger() *log.Entry {
|
||||
if e.server != nil {
|
||||
return e.server.Log().WithField("error_id", e.Uuid)
|
||||
return e.server.Log().WithField("error_id", e.uuid).WithField("error", e.err)
|
||||
}
|
||||
|
||||
return log.WithField("error_id", e.Uuid)
|
||||
return log.WithField("error_id", e.uuid).WithField("error", e.err)
|
||||
}
|
||||
|
||||
// Sets the output message to display to the user in the error.
|
||||
func (e *RequestError) SetMessage(msg string) *RequestError {
|
||||
e.Message = msg
|
||||
e.message = msg
|
||||
return e
|
||||
}
|
||||
|
||||
@@ -60,84 +67,90 @@ func (e *RequestError) SetMessage(msg string) *RequestError {
|
||||
// will also include the error UUID in the output so that the user can report that
|
||||
// and link the response to a specific error in the logs.
|
||||
func (e *RequestError) AbortWithStatus(status int, c *gin.Context) {
|
||||
// In instances where the status has already been set just use that existing status
|
||||
// since we cannot change it at this point, and trying to do so will emit a gin warning
|
||||
// into the program output.
|
||||
if c.Writer.Status() != 200 {
|
||||
status = c.Writer.Status()
|
||||
}
|
||||
|
||||
// If this error is because the resource does not exist, we likely do not need to log
|
||||
// the error anywhere, just return a 404 and move on with our lives.
|
||||
if os.IsNotExist(e.Err) {
|
||||
e.logger().WithField("error", e.Err).Debug("encountered os.IsNotExist error while handling request")
|
||||
|
||||
if errors.Is(e.err, os.ErrNotExist) {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested resource was not found on the system.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(e.err.Error(), "invalid URL escape") {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Some of the data provided in the request appears to be escaped improperly.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// If this is a Filesystem error just return it without all of the tracking code nonsense
|
||||
// since we don't need to be logging it into the logs or anything, its just a normal error
|
||||
// that the user can solve on their end.
|
||||
if st, msg := e.getAsFilesystemError(); st != 0 {
|
||||
c.AbortWithStatusJSON(st, gin.H{"error": msg})
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, log the error to zap, and then report the error back to the user.
|
||||
if status >= 500 {
|
||||
e.logger().WithField("error", e.Err).Error("encountered HTTP/500 error while handling request")
|
||||
|
||||
c.Error(errors.WithStack(e))
|
||||
e.logger().Error("unexpected error while handling HTTP request")
|
||||
} else {
|
||||
e.logger().WithField("error", e.Err).Debug("encountered non-HTTP/500 error while handling request")
|
||||
e.logger().Debug("non-server error encountered while handling HTTP request")
|
||||
}
|
||||
|
||||
msg := "An unexpected error was encountered while processing this request."
|
||||
if e.Message != "" {
|
||||
msg = e.Message
|
||||
if e.message == "" {
|
||||
e.message = "An unexpected error was encountered while processing this request."
|
||||
}
|
||||
|
||||
c.AbortWithStatusJSON(status, gin.H{
|
||||
"error": msg,
|
||||
"error_id": e.Uuid,
|
||||
})
|
||||
c.AbortWithStatusJSON(status, gin.H{"error": e.message, "error_id": e.uuid})
|
||||
}
|
||||
|
||||
// Helper function to just abort with an internal server error. This is generally the response
|
||||
// from most errors encountered by the API.
|
||||
func (e *RequestError) AbortWithServerError(c *gin.Context) {
|
||||
func (e *RequestError) Abort(c *gin.Context) {
|
||||
e.AbortWithStatus(http.StatusInternalServerError, c)
|
||||
}
|
||||
|
||||
// Looks at the given RequestError and determines if it is a specific filesystem error that
|
||||
// we can process and return differently for the user.
|
||||
func (e *RequestError) getAsFilesystemError() (int, string) {
|
||||
// Some external things end up calling fmt.Errorf() on our filesystem errors
|
||||
// which ends up just unleashing chaos on the system. For the sake of this
|
||||
// fallback to using text checks...
|
||||
if filesystem.IsErrorCode(e.err, filesystem.ErrCodeDenylistFile) || strings.Contains(e.err.Error(), "filesystem: file access prohibited") {
|
||||
return http.StatusForbidden, "This file cannot be modified: present in egg denylist."
|
||||
}
|
||||
if filesystem.IsErrorCode(e.err, filesystem.ErrCodePathResolution) || strings.Contains(e.err.Error(), "resolves to a location outside the server root") {
|
||||
return http.StatusNotFound, "The requested resource was not found on the system."
|
||||
}
|
||||
if filesystem.IsErrorCode(e.err, filesystem.ErrCodeIsDirectory) || strings.Contains(e.err.Error(), "filesystem: is a directory") {
|
||||
return http.StatusBadRequest, "Cannot perform that action: file is a directory."
|
||||
}
|
||||
if filesystem.IsErrorCode(e.err, filesystem.ErrCodeDiskSpace) || strings.Contains(e.err.Error(), "filesystem: not enough disk space") {
|
||||
return http.StatusBadRequest, "Cannot perform that action: file is a directory."
|
||||
}
|
||||
if strings.HasSuffix(e.err.Error(), "file name too long") {
|
||||
return http.StatusBadRequest, "Cannot perform that action: file name is too long."
|
||||
}
|
||||
if e, ok := e.err.(*os.SyscallError); ok && e.Syscall == "readdirent" {
|
||||
return http.StatusNotFound, "The requested directory does not exist."
|
||||
}
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
// Handle specific filesystem errors for a server.
|
||||
func (e *RequestError) AbortFilesystemError(c *gin.Context) {
|
||||
if errors.Is(e.Err, os.ErrNotExist) {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested resource was not found.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if errors.Is(e.Err, filesystem.ErrNotEnoughDiskSpace) {
|
||||
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
||||
"error": "There is not enough disk space available to perform that action.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasSuffix(e.Err.Error(), "file name too long") {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "File name is too long.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if e, ok := e.Err.(*os.SyscallError); ok && e.Syscall == "readdirent" {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested directory does not exist.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasSuffix(e.Err.Error(), "file name too long") {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Cannot perform that action: file name is too long.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
e.AbortWithServerError(c)
|
||||
e.Abort(c)
|
||||
}
|
||||
|
||||
// Format the error to a string and include the UUID.
|
||||
func (e *RequestError) Error() string {
|
||||
return fmt.Sprintf("%v (uuid: %s)", e.Err, e.Uuid)
|
||||
return fmt.Sprintf("%v (uuid: %s)", e.err, e.uuid)
|
||||
}
|
||||
|
||||
@@ -2,80 +2,15 @@ package router
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Set the access request control headers on all of the requests.
|
||||
func SetAccessControlHeaders(c *gin.Context) {
|
||||
c.Header("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
|
||||
|
||||
o := c.GetHeader("Origin")
|
||||
if o != config.Get().PanelLocation {
|
||||
for _, origin := range config.Get().AllowedOrigins {
|
||||
if origin != "*" && o != origin {
|
||||
continue
|
||||
}
|
||||
|
||||
c.Header("Access-Control-Allow-Origin", origin)
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.Header("Access-Control-Allow-Origin", config.Get().PanelLocation)
|
||||
c.Next()
|
||||
}
|
||||
|
||||
// Authenticates the request token against the given permission string, ensuring that
|
||||
// if it is a server permission, the token has control over that server. If it is a global
|
||||
// token, this will ensure that the request is using a properly signed global token.
|
||||
func AuthorizationMiddleware(c *gin.Context) {
|
||||
auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2)
|
||||
|
||||
if len(auth) != 2 || auth[0] != "Bearer" {
|
||||
c.Header("WWW-Authenticate", "Bearer")
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "The required authorization heads were not present in the request.",
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Try to match the request against the global token for the Daemon, regardless
|
||||
// of the permission type. If nothing is matched we will fall through to the Panel
|
||||
// API to try and validate permissions for a server.
|
||||
if auth[1] == config.Get().AuthenticationToken {
|
||||
c.Next()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
|
||||
"error": "You are not authorized to access this endpoint.",
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to fetch a server out of the servers collection stored in memory.
|
||||
func GetServer(uuid string) *server.Server {
|
||||
return server.GetServers().Find(func(s *server.Server) bool {
|
||||
return uuid == s.Id()
|
||||
})
|
||||
}
|
||||
|
||||
// Ensure that the requested server exists in this setup. Returns a 404 if we cannot
|
||||
// locate it.
|
||||
func ServerExists(c *gin.Context) {
|
||||
u, err := uuid.Parse(c.Param("server"))
|
||||
if err != nil || GetServer(u.String()) == nil {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The resource you requested does not exist.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
// ExtractServer returns the server instance from the gin context. If there is
|
||||
// no server set in the context (e.g. calling from a controller not protected
|
||||
// by ServerExists) this function will panic.
|
||||
//
|
||||
// This function is deprecated. Use middleware.ExtractServer.
|
||||
func ExtractServer(c *gin.Context) *server.Server {
|
||||
return middleware.ExtractServer(c)
|
||||
}
|
||||
|
||||
354
router/middleware/middleware.go
Normal file
354
router/middleware/middleware.go
Normal file
@@ -0,0 +1,354 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
)
|
||||
|
||||
// RequestError is a custom error type returned when something goes wrong with
|
||||
// any of the HTTP endpoints.
|
||||
type RequestError struct {
|
||||
err error
|
||||
status int
|
||||
msg string
|
||||
}
|
||||
|
||||
// NewError returns a new RequestError for the provided error.
|
||||
func NewError(err error) *RequestError {
|
||||
return &RequestError{
|
||||
// Attach a stacktrace to the error if it is missing at this point and mark it
|
||||
// as originating from the location where NewError was called, rather than this
|
||||
// specific point in the code.
|
||||
err: errors.WithStackDepthIf(err, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// SetMessage allows for a custom error message to be set on an existing
|
||||
// RequestError instance.
|
||||
func (re *RequestError) SetMessage(m string) {
|
||||
re.msg = m
|
||||
}
|
||||
|
||||
// SetStatus sets the HTTP status code for the error response. By default this
|
||||
// is a HTTP-500 error.
|
||||
func (re *RequestError) SetStatus(s int) {
|
||||
re.status = s
|
||||
}
|
||||
|
||||
// Abort aborts the given HTTP request with the specified status code and then
|
||||
// logs the event into the logs. The error that is output will include the unique
|
||||
// request ID if it is present.
|
||||
func (re *RequestError) Abort(c *gin.Context, status int) {
|
||||
reqId := c.Writer.Header().Get("X-Request-Id")
|
||||
|
||||
// Generate the base logger instance, attaching the unique request ID and
|
||||
// the URL that was requested.
|
||||
event := log.WithField("request_id", reqId).WithField("url", c.Request.URL.String())
|
||||
// If there is a server present in the gin.Context stack go ahead and pull it
|
||||
// and attach that server UUID to the logs as well so that we can see what specific
|
||||
// server triggered this error.
|
||||
if s, ok := c.Get("server"); ok {
|
||||
if s, ok := s.(*server.Server); ok {
|
||||
event = event.WithField("server_id", s.Id())
|
||||
}
|
||||
}
|
||||
|
||||
if c.Writer.Status() == 200 {
|
||||
// Handle context deadlines being exceeded a little differently since we want
|
||||
// to report a more user-friendly error and a proper error code. The "context
|
||||
// canceled" error is generally when a request is terminated before all of the
|
||||
// logic is finished running.
|
||||
if errors.Is(re.err, context.DeadlineExceeded) {
|
||||
re.SetStatus(http.StatusGatewayTimeout)
|
||||
re.SetMessage("The server could not process this request in time, please try again.")
|
||||
} else if strings.Contains(re.Cause().Error(), "context canceled") {
|
||||
re.SetStatus(http.StatusBadRequest)
|
||||
re.SetMessage("Request aborted by client.")
|
||||
}
|
||||
}
|
||||
|
||||
// c.Writer.Status() will be a non-200 value if the headers have already been sent
|
||||
// to the requester but an error is encountered. This can happen if there is an issue
|
||||
// marshaling a struct placed into a c.JSON() call (or c.AbortWithJSON() call).
|
||||
if status >= 500 || c.Writer.Status() != 200 {
|
||||
event.WithField("status", status).WithField("error", re.err).Error("error while handling HTTP request")
|
||||
} else {
|
||||
event.WithField("status", status).WithField("error", re.err).Debug("error handling HTTP request (not a server error)")
|
||||
}
|
||||
if re.msg == "" {
|
||||
re.msg = "An unexpected error was encountered while processing this request"
|
||||
}
|
||||
// Now abort the request with the error message and include the unique request
|
||||
// ID that was present to make things super easy on people who don't know how
|
||||
// or cannot view the response headers (where X-Request-Id would be present).
|
||||
c.AbortWithStatusJSON(status, gin.H{"error": re.msg, "request_id": reqId})
|
||||
}
|
||||
|
||||
// Cause returns the underlying error.
|
||||
func (re *RequestError) Cause() error {
|
||||
return re.err
|
||||
}
|
||||
|
||||
// Error returns the underlying error message for this request.
|
||||
func (re *RequestError) Error() string {
|
||||
return re.err.Error()
|
||||
}
|
||||
|
||||
// Looks at the given RequestError and determines if it is a specific filesystem
|
||||
// error that we can process and return differently for the user.
|
||||
//
|
||||
// Some external things end up calling fmt.Errorf() on our filesystem errors
|
||||
// which ends up just unleashing chaos on the system. For the sake of this,
|
||||
// fallback to using text checks.
|
||||
//
|
||||
// If the error passed into this call is nil or does not match empty values will
|
||||
// be returned to the caller.
|
||||
func (re *RequestError) asFilesystemError() (int, string) {
|
||||
err := re.Cause()
|
||||
if err == nil {
|
||||
return 0, ""
|
||||
}
|
||||
if filesystem.IsErrorCode(err, filesystem.ErrCodeDenylistFile) || strings.Contains(err.Error(), "filesystem: file access prohibited") {
|
||||
return http.StatusForbidden, "This file cannot be modified: present in egg denylist."
|
||||
}
|
||||
if filesystem.IsErrorCode(err, filesystem.ErrCodePathResolution) || strings.Contains(err.Error(), "resolves to a location outside the server root") {
|
||||
return http.StatusNotFound, "The requested resource was not found on the system."
|
||||
}
|
||||
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) || strings.Contains(err.Error(), "filesystem: is a directory") {
|
||||
return http.StatusBadRequest, "Cannot perform that action: file is a directory."
|
||||
}
|
||||
if filesystem.IsErrorCode(err, filesystem.ErrCodeDiskSpace) || strings.Contains(err.Error(), "filesystem: not enough disk space") {
|
||||
return http.StatusBadRequest, "There is not enough disk space available to perform that action."
|
||||
}
|
||||
if strings.HasSuffix(err.Error(), "file name too long") {
|
||||
return http.StatusBadRequest, "Cannot perform that action: file name is too long."
|
||||
}
|
||||
if e, ok := err.(*os.SyscallError); ok && e.Syscall == "readdirent" {
|
||||
return http.StatusNotFound, "The requested directory does not exist."
|
||||
}
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
// AttachRequestID attaches a unique ID to the incoming HTTP request so that any
|
||||
// errors that are generated or returned to the client will include this reference
|
||||
// allowing for an easier time identifying the specific request that failed for
|
||||
// the user.
|
||||
//
|
||||
// If you are using a tool such as Sentry or Bugsnag for error reporting this is
|
||||
// a great location to also attach this request ID to your error handling logic
|
||||
// so that you can easily cross-reference the errors.
|
||||
func AttachRequestID() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
id := uuid.New().String()
|
||||
c.Set("request_id", id)
|
||||
c.Set("logger", log.WithField("request_id", id))
|
||||
c.Header("X-Request-Id", id)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// AttachServerManager attaches the server manager to the request context which
|
||||
// allows routes to access the underlying server collection.
|
||||
func AttachServerManager(m *server.Manager) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
c.Set("manager", m)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// AttachApiClient attaches the application API client which allows routes to
|
||||
// access server resources from the Panel easily.
|
||||
func AttachApiClient(client remote.Client) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
c.Set("api_client", client)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// CaptureAndAbort aborts the request and attaches the provided error to the gin
|
||||
// context so it can be reported properly. If the error is missing a stacktrace
|
||||
// at the time it is called the stack will be attached.
|
||||
func CaptureAndAbort(c *gin.Context, err error) {
|
||||
c.Abort()
|
||||
c.Error(errors.WithStackDepthIf(err, 1))
|
||||
}
|
||||
|
||||
// CaptureErrors is custom handler function allowing for errors bubbled up by
|
||||
// c.Error() to be returned in a standardized format with tracking UUIDs on them
|
||||
// for easier log searching.
|
||||
func CaptureErrors() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
c.Next()
|
||||
err := c.Errors.Last()
|
||||
if err == nil || err.Err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
status := http.StatusInternalServerError
|
||||
if c.Writer.Status() != 200 {
|
||||
status = c.Writer.Status()
|
||||
}
|
||||
if err.Error() == io.EOF.Error() {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "The data passed in the request was not in a parsable format. Please try again."})
|
||||
return
|
||||
}
|
||||
captured := NewError(err.Err)
|
||||
if status, msg := captured.asFilesystemError(); msg != "" {
|
||||
c.AbortWithStatusJSON(status, gin.H{"error": msg, "request_id": c.Writer.Header().Get("X-Request-Id")})
|
||||
return
|
||||
}
|
||||
captured.Abort(c, status)
|
||||
}
|
||||
}
|
||||
|
||||
// SetAccessControlHeaders sets the access request control headers on all of
|
||||
// the requests.
|
||||
func SetAccessControlHeaders() gin.HandlerFunc {
|
||||
origins := config.Get().AllowedOrigins
|
||||
location := config.Get().PanelLocation
|
||||
|
||||
return func(c *gin.Context) {
|
||||
c.Header("Access-Control-Allow-Credentials", "true")
|
||||
c.Header("Access-Control-Allow-Methods", "GET, POST, PATCH, PUT, DELETE, OPTIONS")
|
||||
// Maximum age allowable under Chromium v76 is 2 hours, so just use that since
|
||||
// anything higher will be ignored (even if other browsers do allow higher values).
|
||||
//
|
||||
// @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age#Directives
|
||||
c.Header("Access-Control-Max-Age", "7200")
|
||||
c.Header("Access-Control-Allow-Origin", location)
|
||||
c.Header("Access-Control-Allow-Headers", "Accept, Accept-Encoding, Authorization, Cache-Control, Content-Type, Content-Length, Origin, X-Real-IP, X-CSRF-Token")
|
||||
// Validate that the request origin is coming from an allowed origin. Because you
|
||||
// cannot set multiple values here we need to see if the origin is one of the ones
|
||||
// that we allow, and if so return it explicitly. Otherwise, just return the default
|
||||
// origin which is the same URL that the Panel is located at.
|
||||
origin := c.GetHeader("Origin")
|
||||
if origin != location {
|
||||
for _, o := range origins {
|
||||
if o != "*" && o != origin {
|
||||
continue
|
||||
}
|
||||
c.Header("Access-Control-Allow-Origin", o)
|
||||
break
|
||||
}
|
||||
}
|
||||
if c.Request.Method == http.MethodOptions {
|
||||
c.AbortWithStatus(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// ServerExists will ensure that the requested server exists in this setup.
|
||||
// Returns a 404 if we cannot locate it. If the server is found it is set into
|
||||
// the request context, and the logger for the context is also updated to include
|
||||
// the server ID in the fields list.
|
||||
func ServerExists() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
var s *server.Server
|
||||
if c.Param("server") != "" {
|
||||
manager := ExtractManager(c)
|
||||
s = manager.Find(func(s *server.Server) bool {
|
||||
return c.Param("server") == s.Id()
|
||||
})
|
||||
}
|
||||
if s == nil {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{"error": "The requested resource does not exist on this instance."})
|
||||
return
|
||||
}
|
||||
c.Set("logger", ExtractLogger(c).WithField("server_id", s.Id()))
|
||||
c.Set("server", s)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// RequireAuthorization authenticates the request token against the given
|
||||
// permission string, ensuring that if it is a server permission, the token has
|
||||
// control over that server. If it is a global token, this will ensure that the
|
||||
// request is using a properly signed global token.
|
||||
func RequireAuthorization() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// We don't put this value outside this function since the node's authentication
|
||||
// token can be changed on the fly and the config.Get() call returns a copy, so
|
||||
// if it is rotated this value will never properly get updated.
|
||||
token := config.Get().AuthenticationToken
|
||||
auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2)
|
||||
if len(auth) != 2 || auth[0] != "Bearer" {
|
||||
c.Header("WWW-Authenticate", "Bearer")
|
||||
c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "The required authorization heads were not present in the request."})
|
||||
return
|
||||
}
|
||||
|
||||
// All requests to Wings must be authorized with the authentication token present in
|
||||
// the Wings configuration file. Remeber, all requests to Wings come from the Panel
|
||||
// backend, or using a signed JWT for temporary authentication.
|
||||
if subtle.ConstantTimeCompare([]byte(auth[1]), []byte(token)) != 1 {
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "You are not authorized to access this endpoint."})
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteDownloadEnabled checks if remote downloads are enabled for this instance
|
||||
// and if not aborts the request.
|
||||
func RemoteDownloadEnabled() gin.HandlerFunc {
|
||||
disabled := config.Get().Api.DisableRemoteDownload
|
||||
return func(c *gin.Context) {
|
||||
if disabled {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "This functionality is not currently enabled on this instance."})
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractLogger pulls the logger out of the request context and returns it. By
|
||||
// default this will include the request ID, but may also include the server ID
|
||||
// if that middleware has been used in the chain by the time it is called.
|
||||
func ExtractLogger(c *gin.Context) *log.Entry {
|
||||
v, ok := c.Get("logger")
|
||||
if !ok {
|
||||
panic("middleware/middleware: cannot extract logger: not present in request context")
|
||||
}
|
||||
return v.(*log.Entry)
|
||||
}
|
||||
|
||||
// ExtractServer will return the server from the gin.Context or panic if it is
|
||||
// not present.
|
||||
func ExtractServer(c *gin.Context) *server.Server {
|
||||
v, ok := c.Get("server")
|
||||
if !ok {
|
||||
panic("middleware/middleware: cannot extract server: not present in request context")
|
||||
}
|
||||
return v.(*server.Server)
|
||||
}
|
||||
|
||||
// ExtractApiClient returns the API client defined for the routes.
|
||||
func ExtractApiClient(c *gin.Context) remote.Client {
|
||||
if v, ok := c.Get("api_client"); ok {
|
||||
return v.(remote.Client)
|
||||
}
|
||||
panic("middleware/middlware: cannot extract api clinet: not present in context")
|
||||
}
|
||||
|
||||
// ExtractManager returns the server manager instance set on the request context.
|
||||
func ExtractManager(c *gin.Context) *server.Manager {
|
||||
if v, ok := c.Get("manager"); ok {
|
||||
return v.(*server.Manager)
|
||||
}
|
||||
panic("middleware/middleware: cannot extract server manager: not present in context")
|
||||
}
|
||||
@@ -3,16 +3,19 @@ package router
|
||||
import (
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
)
|
||||
|
||||
// Configures the routing infrastructure for this daemon instance.
|
||||
func Configure() *gin.Engine {
|
||||
// Configure configures the routing infrastructure for this daemon instance.
|
||||
func Configure(m *server.Manager, client remote.Client) *gin.Engine {
|
||||
gin.SetMode("release")
|
||||
|
||||
router := gin.New()
|
||||
|
||||
router.Use(gin.Recovery())
|
||||
router.Use(SetAccessControlHeaders)
|
||||
router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders())
|
||||
router.Use(middleware.AttachServerManager(m), middleware.AttachApiClient(client))
|
||||
// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
|
||||
// This should still dump requests in debug mode since it does help with understanding the request
|
||||
// lifecycle and quickly seeing what was called leading to the logs. However, it isn't feasible to mix
|
||||
@@ -23,15 +26,12 @@ func Configure() *gin.Engine {
|
||||
"client_ip": params.ClientIP,
|
||||
"status": params.StatusCode,
|
||||
"latency": params.Latency,
|
||||
"request_id": params.Keys["request_id"],
|
||||
}).Debugf("%s %s", params.MethodColor()+params.Method+params.ResetColor(), params.Path)
|
||||
|
||||
return ""
|
||||
}))
|
||||
|
||||
router.OPTIONS("/api/system", func(c *gin.Context) {
|
||||
c.Status(200)
|
||||
})
|
||||
|
||||
// These routes use signed URLs to validate access to the resource being requested.
|
||||
router.GET("/download/backup", getDownloadBackup)
|
||||
router.GET("/download/file", getDownloadFile)
|
||||
@@ -40,16 +40,16 @@ func Configure() *gin.Engine {
|
||||
// This route is special it sits above all of the other requests because we are
|
||||
// using a JWT to authorize access to it, therefore it needs to be publicly
|
||||
// accessible.
|
||||
router.GET("/api/servers/:server/ws", ServerExists, getServerWebsocket)
|
||||
router.GET("/api/servers/:server/ws", middleware.ServerExists(), getServerWebsocket)
|
||||
|
||||
// This request is called by another daemon when a server is going to be transferred out.
|
||||
// This request does not need the AuthorizationMiddleware as the panel should never call it
|
||||
// and requests are authenticated through a JWT the panel issues to the other daemon.
|
||||
router.GET("/api/servers/:server/archive", ServerExists, getServerArchive)
|
||||
router.GET("/api/servers/:server/archive", middleware.ServerExists(), getServerArchive)
|
||||
|
||||
// All of the routes beyond this mount will use an authorization middleware
|
||||
// and will not be accessible without the correct Authorization header provided.
|
||||
protected := router.Use(AuthorizationMiddleware)
|
||||
protected := router.Use(middleware.RequireAuthorization())
|
||||
protected.POST("/api/update", postUpdateConfiguration)
|
||||
protected.GET("/api/system", getSystemInformation)
|
||||
protected.GET("/api/servers", getAllServers)
|
||||
@@ -59,7 +59,7 @@ func Configure() *gin.Engine {
|
||||
// These are server specific routes, and require that the request be authorized, and
|
||||
// that the server exist on the Daemon.
|
||||
server := router.Group("/api/servers/:server")
|
||||
server.Use(AuthorizationMiddleware, ServerExists)
|
||||
server.Use(middleware.RequireAuthorization(), middleware.ServerExists())
|
||||
{
|
||||
server.GET("", getServer)
|
||||
server.PATCH("", patchServer)
|
||||
@@ -70,6 +70,7 @@ func Configure() *gin.Engine {
|
||||
server.POST("/commands", postServerCommands)
|
||||
server.POST("/install", postServerInstall)
|
||||
server.POST("/reinstall", postServerReinstall)
|
||||
server.POST("/ws/deny", postServerDenyWSTokens)
|
||||
|
||||
// This archive request causes the archive to start being created
|
||||
// this should only be triggered by the panel.
|
||||
@@ -86,11 +87,17 @@ func Configure() *gin.Engine {
|
||||
files.POST("/delete", postServerDeleteFiles)
|
||||
files.POST("/compress", postServerCompressFiles)
|
||||
files.POST("/decompress", postServerDecompressFiles)
|
||||
files.POST("/chmod", postServerChmodFile)
|
||||
|
||||
files.GET("/pull", middleware.RemoteDownloadEnabled(), getServerPullingFiles)
|
||||
files.POST("/pull", middleware.RemoteDownloadEnabled(), postServerPullRemoteFile)
|
||||
files.DELETE("/pull/:download", middleware.RemoteDownloadEnabled(), deleteServerPullRemoteFile)
|
||||
}
|
||||
|
||||
backup := server.Group("/backup")
|
||||
{
|
||||
backup.POST("", postServerBackup)
|
||||
backup.POST("/:backup/restore", postServerRestoreBackup)
|
||||
backup.DELETE("/:backup", deleteServerBackup)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,31 +3,36 @@ package router
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
)
|
||||
|
||||
// Handle a download request for a server backup.
|
||||
func getDownloadBackup(c *gin.Context) {
|
||||
client := middleware.ExtractApiClient(c)
|
||||
manager := middleware.ExtractManager(c)
|
||||
|
||||
token := tokens.BackupPayload{}
|
||||
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
||||
TrackedError(err).AbortWithServerError(c)
|
||||
NewTrackedError(err).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
s := GetServer(token.ServerUuid)
|
||||
if s == nil || !token.IsUniqueRequest() {
|
||||
s, ok := manager.Get(token.ServerUuid)
|
||||
if !ok || !token.IsUniqueRequest() {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested resource was not found on this server.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
b, st, err := backup.LocateLocal(token.BackupUuid)
|
||||
b, st, err := backup.LocateLocal(client, token.BackupUuid)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
@@ -36,19 +41,19 @@ func getDownloadBackup(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Open(b.Path())
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
||||
c.Header("Content-Disposition", "attachment; filename="+st.Name())
|
||||
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
|
||||
c.Header("Content-Type", "application/octet-stream")
|
||||
|
||||
bufio.NewReader(f).WriteTo(c.Writer)
|
||||
@@ -56,14 +61,15 @@ func getDownloadBackup(c *gin.Context) {
|
||||
|
||||
// Handles downloading a specific file for a server.
|
||||
func getDownloadFile(c *gin.Context) {
|
||||
manager := middleware.ExtractManager(c)
|
||||
token := tokens.FilePayload{}
|
||||
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
||||
TrackedError(err).AbortWithServerError(c)
|
||||
NewTrackedError(err).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
s := GetServer(token.ServerUuid)
|
||||
if s == nil || !token.IsUniqueRequest() {
|
||||
s, ok := manager.Get(token.ServerUuid)
|
||||
if !ok || !token.IsUniqueRequest() {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested resource was not found on this server.",
|
||||
})
|
||||
@@ -75,7 +81,7 @@ func getDownloadFile(c *gin.Context) {
|
||||
// If there is an error or we're somehow trying to download a directory, just
|
||||
// respond with the appropriate error.
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
} else if st.IsDir() {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
@@ -86,12 +92,12 @@ func getDownloadFile(c *gin.Context) {
|
||||
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
||||
c.Header("Content-Disposition", "attachment; filename="+st.Name())
|
||||
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
|
||||
c.Header("Content-Type", "application/octet-stream")
|
||||
|
||||
bufio.NewReader(f).WriteTo(c.Writer)
|
||||
|
||||
@@ -3,13 +3,17 @@ package router
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/router/downloader"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
)
|
||||
|
||||
type serverProcData struct {
|
||||
@@ -19,17 +23,17 @@ type serverProcData struct {
|
||||
|
||||
// Returns a single server from the collection of servers.
|
||||
func getServer(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
c.JSON(http.StatusOK, serverProcData{
|
||||
ResourceUsage: *s.Proc(),
|
||||
ResourceUsage: s.Proc(),
|
||||
Suspended: s.IsSuspended(),
|
||||
})
|
||||
}
|
||||
|
||||
// Returns the logs for a given server instance.
|
||||
func getServerLogs(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
l, _ := strconv.Atoi(c.DefaultQuery("size", "100"))
|
||||
if l <= 0 {
|
||||
@@ -40,7 +44,7 @@ func getServerLogs(c *gin.Context) {
|
||||
|
||||
out, err := s.ReadLogfile(l)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -56,7 +60,7 @@ func getServerLogs(c *gin.Context) {
|
||||
// things are happening, so theres no reason to sit and wait for a request to finish. We'll
|
||||
// just see over the socket if something isn't working correctly.
|
||||
func postServerPower(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
var data struct {
|
||||
Action server.PowerAction `json:"action"`
|
||||
@@ -106,10 +110,10 @@ func postServerPower(c *gin.Context) {
|
||||
|
||||
// Sends an array of commands to a running server instance.
|
||||
func postServerCommands(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
if running, err := s.Environment.IsRunning(); err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
} else if !running {
|
||||
c.AbortWithStatusJSON(http.StatusBadGateway, gin.H{
|
||||
@@ -137,13 +141,13 @@ func postServerCommands(c *gin.Context) {
|
||||
|
||||
// Updates information about a server internally.
|
||||
func patchServer(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
buf.ReadFrom(c.Request.Body)
|
||||
|
||||
if err := s.UpdateDataStructure(buf.Bytes()); err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -154,7 +158,7 @@ func patchServer(c *gin.Context) {
|
||||
|
||||
// Performs a server installation in a background thread.
|
||||
func postServerInstall(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
go func(serv *server.Server) {
|
||||
if err := serv.Install(true); err != nil {
|
||||
@@ -167,7 +171,7 @@ func postServerInstall(c *gin.Context) {
|
||||
|
||||
// Reinstalls a server.
|
||||
func postServerReinstall(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
if s.ExecutingPowerAction() {
|
||||
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
||||
@@ -187,33 +191,30 @@ func postServerReinstall(c *gin.Context) {
|
||||
|
||||
// Deletes a server from the wings daemon and dissociate it's objects.
|
||||
func deleteServer(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := middleware.ExtractServer(c)
|
||||
|
||||
// Immediately suspend the server to prevent a user from attempting
|
||||
// to start it while this process is running.
|
||||
s.Config().SetSuspended(true)
|
||||
|
||||
// If the server is currently installing, abort it.
|
||||
if s.IsInstalling() {
|
||||
s.AbortInstallation()
|
||||
}
|
||||
|
||||
// Delete the server's archive if it exists. We intentionally don't return
|
||||
// here, if the archive fails to delete, the server can still be removed.
|
||||
if err := s.Archiver.DeleteIfExists(); err != nil {
|
||||
s.Log().WithField("error", err).Warn("failed to delete server archive during deletion process")
|
||||
}
|
||||
|
||||
// Unsubscribe all of the event listeners.
|
||||
// Stop all running background tasks for this server that are using the context on
|
||||
// the server struct. This will cancel any running install processes for the server
|
||||
// as well.
|
||||
s.CtxCancel()
|
||||
s.Events().Destroy()
|
||||
s.Throttler().StopTimer()
|
||||
s.Websockets().CancelAll()
|
||||
|
||||
// Remove any pending remote file downloads for the server.
|
||||
for _, dl := range downloader.ByServer(s.Id()) {
|
||||
dl.Cancel()
|
||||
}
|
||||
|
||||
// Destroy the environment; in Docker this will handle a running container and
|
||||
// forcibly terminate it before removing the container, so we do not need to handle
|
||||
// that here.
|
||||
if err := s.Environment.Destroy(); err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
WithError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Once the environment is terminated, remove the server files from the system. This is
|
||||
@@ -224,16 +225,12 @@ func deleteServer(c *gin.Context) {
|
||||
// so we don't want to block the HTTP call while waiting on this.
|
||||
go func(p string) {
|
||||
if err := os.RemoveAll(p); err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"path": p,
|
||||
"error": errors.WithStack(err),
|
||||
}).Warn("failed to remove server files during deletion process")
|
||||
log.WithFields(log.Fields{"path": p, "error": err}).Warn("failed to remove server files during deletion process")
|
||||
}
|
||||
}(s.Filesystem().Path())
|
||||
|
||||
var uuid = s.Id()
|
||||
server.GetServers().Remove(func(s2 *server.Server) bool {
|
||||
return s2.Id() == uuid
|
||||
middleware.ExtractManager(c).Remove(func(server *server.Server) bool {
|
||||
return server.Id() == s.Id()
|
||||
})
|
||||
|
||||
// Deallocate the reference to this server.
|
||||
@@ -241,3 +238,22 @@ func deleteServer(c *gin.Context) {
|
||||
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// Adds any of the JTIs passed through in the body to the deny list for the websocket
|
||||
// preventing any JWT generated before the current time from being used to connect to
|
||||
// the socket or send along commands.
|
||||
func postServerDenyWSTokens(c *gin.Context) {
|
||||
var data struct {
|
||||
JTIs []string `json:"jtis"`
|
||||
}
|
||||
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, jti := range data.JTIs {
|
||||
tokens.DenyJTI(jti)
|
||||
}
|
||||
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -1,59 +1,167 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
)
|
||||
|
||||
// Backs up a server.
|
||||
// postServerBackup performs a backup against a given server instance using the
|
||||
// provided backup adapter.
|
||||
func postServerBackup(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
data := &backup.Request{}
|
||||
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||
s := middleware.ExtractServer(c)
|
||||
client := middleware.ExtractApiClient(c)
|
||||
logger := middleware.ExtractLogger(c)
|
||||
var data struct {
|
||||
Adapter backup.AdapterType `json:"adapter"`
|
||||
Uuid string `json:"uuid"`
|
||||
Ignore string `json:"ignore"`
|
||||
}
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var adapter backup.BackupInterface
|
||||
var err error
|
||||
|
||||
switch data.Adapter {
|
||||
case backup.LocalBackupAdapter:
|
||||
adapter, err = data.NewLocalBackup()
|
||||
adapter = backup.NewLocal(client, data.Uuid, data.Ignore)
|
||||
case backup.S3BackupAdapter:
|
||||
adapter, err = data.NewS3Backup()
|
||||
adapter = backup.NewS3(client, data.Uuid, data.Ignore)
|
||||
default:
|
||||
err = errors.New(fmt.Sprintf("unknown backup adapter [%s] provided", data.Adapter))
|
||||
middleware.CaptureAndAbort(c, errors.New("router/backups: provided adapter is not valid: "+string(data.Adapter)))
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
// Attach the server ID and the request ID to the adapter log context for easier
|
||||
// parsing in the logs.
|
||||
adapter.WithLogContext(map[string]interface{}{
|
||||
"server": s.Id(),
|
||||
"request_id": c.GetString("request_id"),
|
||||
})
|
||||
|
||||
go func(b backup.BackupInterface, serv *server.Server) {
|
||||
if err := serv.Backup(b); err != nil {
|
||||
serv.Log().WithField("error", err).Error("failed to generate backup for server")
|
||||
go func(b backup.BackupInterface, s *server.Server, logger *log.Entry) {
|
||||
if err := s.Backup(b); err != nil {
|
||||
logger.WithField("error", errors.WithStackIf(err)).Error("router: failed to generate server backup")
|
||||
}
|
||||
}(adapter, s)
|
||||
}(adapter, s, logger)
|
||||
|
||||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// Deletes a local backup of a server. If the backup is not found on the machine just return
|
||||
// a 404 error. The service calling this endpoint can make its own decisions as to how it wants
|
||||
// to handle that response.
|
||||
func deleteServerBackup(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
// postServerRestoreBackup handles restoring a backup for a server by downloading
|
||||
// or finding the given backup on the system and then unpacking the archive into
|
||||
// the server's data directory. If the TruncateDirectory field is provided and
|
||||
// is true all of the files will be deleted for the server.
|
||||
//
|
||||
// This endpoint will block until the backup is fully restored allowing for a
|
||||
// spinner to be displayed in the Panel UI effectively.
|
||||
//
|
||||
// TODO: stop the server if it is running; internally mark it as suspended
|
||||
func postServerRestoreBackup(c *gin.Context) {
|
||||
s := middleware.ExtractServer(c)
|
||||
client := middleware.ExtractApiClient(c)
|
||||
logger := middleware.ExtractLogger(c)
|
||||
|
||||
b, _, err := backup.LocateLocal(c.Param("backup"))
|
||||
var data struct {
|
||||
Adapter backup.AdapterType `binding:"required,oneof=wings s3" json:"adapter"`
|
||||
TruncateDirectory bool `json:"truncate_directory"`
|
||||
// A UUID is always required for this endpoint, however the download URL
|
||||
// is only present when the given adapter type is s3.
|
||||
DownloadUrl string `json:"download_url"`
|
||||
}
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
if data.Adapter == backup.S3BackupAdapter && data.DownloadUrl == "" {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "The download_url field is required when the backup adapter is set to S3."})
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("processing server backup restore request")
|
||||
if data.TruncateDirectory {
|
||||
logger.Info(`recieved "truncate_directory" flag in request: deleting server files`)
|
||||
if err := s.Filesystem().TruncateRootDirectory(); err != nil {
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Now that we've cleaned up the data directory if necessary, grab the backup file
|
||||
// and attempt to restore it into the server directory.
|
||||
if data.Adapter == backup.LocalBackupAdapter {
|
||||
b, _, err := backup.LocateLocal(client, c.Param("backup"))
|
||||
if err != nil {
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
go func(s *server.Server, b backup.BackupInterface, logger *log.Entry) {
|
||||
logger.Info("starting restoration process for server backup using local driver")
|
||||
if err := s.RestoreBackup(b, nil); err != nil {
|
||||
logger.WithField("error", err).Error("failed to restore local backup to server")
|
||||
}
|
||||
s.Events().Publish(server.DaemonMessageEvent, "Completed server restoration from local backup.")
|
||||
s.Events().Publish(server.BackupRestoreCompletedEvent, "")
|
||||
logger.Info("completed server restoration from local backup")
|
||||
}(s, b, logger)
|
||||
c.Status(http.StatusAccepted)
|
||||
return
|
||||
}
|
||||
|
||||
// Since this is not a local backup we need to stream the archive and then
|
||||
// parse over the contents as we go in order to restore it to the server.
|
||||
httpClient := http.Client{}
|
||||
logger.Info("downloading backup from remote location...")
|
||||
// TODO: this will hang if there is an issue. We can't use c.Request.Context() (or really any)
|
||||
// since it will be canceled when the request is closed which happens quickly since we push
|
||||
// this into the background.
|
||||
//
|
||||
// For now I'm just using the server context so at least the request is canceled if
|
||||
// the server gets deleted.
|
||||
req, err := http.NewRequestWithContext(s.Context(), http.MethodGet, data.DownloadUrl, nil)
|
||||
if err != nil {
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
res, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
// Don't allow content types that we know are going to give us problems.
|
||||
if res.Header.Get("Content-Type") == "" || !strings.Contains("application/x-gzip application/gzip", res.Header.Get("Content-Type")) {
|
||||
res.Body.Close()
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "The provided backup link is not a supported content type. \"" + res.Header.Get("Content-Type") + "\" is not application/x-gzip.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
go func(s *server.Server, uuid string, logger *log.Entry) {
|
||||
logger.Info("starting restoration process for server backup using S3 driver")
|
||||
if err := s.RestoreBackup(backup.NewS3(client, uuid, ""), res.Body); err != nil {
|
||||
logger.WithField("error", errors.WithStack(err)).Error("failed to restore remote S3 backup to server")
|
||||
}
|
||||
s.Events().Publish(server.DaemonMessageEvent, "Completed server restoration from S3 backup.")
|
||||
s.Events().Publish(server.BackupRestoreCompletedEvent, "")
|
||||
logger.Info("completed server restoration from S3 backup")
|
||||
}(s, c.Param("backup"), logger)
|
||||
|
||||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// deleteServerBackup deletes a local backup of a server. If the backup is not
|
||||
// found on the machine just return a 404 error. The service calling this
|
||||
// endpoint can make its own decisions as to how it wants to handle that
|
||||
// response.
|
||||
func deleteServerBackup(c *gin.Context) {
|
||||
b, _, err := backup.LocateLocal(middleware.ExtractApiClient(c), c.Param("backup"))
|
||||
if err != nil {
|
||||
// Just return from the function at this point if the backup was not located.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
@@ -62,20 +170,15 @@ func deleteServerBackup(c *gin.Context) {
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := b.Remove(); err != nil {
|
||||
// I'm not entirely sure how likely this is to happen, however if we did manage to locate
|
||||
// the backup previously and it is now missing when we go to delete, just treat it as having
|
||||
// been successful, rather than returning a 404.
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
// I'm not entirely sure how likely this is to happen, however if we did manage to
|
||||
// locate the backup previously and it is now missing when we go to delete, just
|
||||
// treat it as having been successful, rather than returning a 404.
|
||||
if err := b.Remove(); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
package router
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -16,59 +11,58 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/router/downloader"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Returns the contents of a file on the server.
|
||||
// getServerFileContents returns the contents of a file on the server.
|
||||
func getServerFileContents(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
p, err := url.QueryUnescape(c.Query("file"))
|
||||
s := middleware.ExtractServer(c)
|
||||
p := "/" + strings.TrimLeft(c.Query("file"), "/")
|
||||
f, st, err := s.Filesystem().File(p)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
p = "/" + strings.TrimLeft(p, "/")
|
||||
|
||||
st, err := s.Filesystem().Stat(p)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortFilesystemError(c)
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
c.Header("X-Mime-Type", st.Mimetype)
|
||||
c.Header("Content-Length", strconv.Itoa(int(st.Info.Size())))
|
||||
|
||||
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
||||
// If a download parameter is included in the URL go ahead and attach the necessary headers
|
||||
// so that the file can be downloaded.
|
||||
if c.Query("download") != "" {
|
||||
c.Header("Content-Disposition", "attachment; filename="+st.Info.Name())
|
||||
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
|
||||
c.Header("Content-Type", "application/octet-stream")
|
||||
}
|
||||
|
||||
if err := s.Filesystem().Readfile(p, c.Writer); err != nil {
|
||||
TrackedServerError(err, s).AbortFilesystemError(c)
|
||||
defer c.Writer.Flush()
|
||||
_, err = bufio.NewReader(f).WriteTo(c.Writer)
|
||||
if err != nil {
|
||||
// Pretty sure this will unleash chaos on the response, but its a risk we can
|
||||
// take since a panic will at least be recovered and this should be incredibly
|
||||
// rare?
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the contents of a directory for a server.
|
||||
func getServerListDirectory(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
d, err := url.QueryUnescape(c.Query("directory"))
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := s.Filesystem().ListDirectory(d)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortFilesystemError(c)
|
||||
return
|
||||
}
|
||||
|
||||
s := ExtractServer(c)
|
||||
dir := c.Query("directory")
|
||||
if stats, err := s.Filesystem().ListDirectory(dir); err != nil {
|
||||
WithError(c, err)
|
||||
} else {
|
||||
c.JSON(http.StatusOK, stats)
|
||||
}
|
||||
}
|
||||
|
||||
type renameFile struct {
|
||||
To string `json:"to"`
|
||||
@@ -77,7 +71,7 @@ type renameFile struct {
|
||||
|
||||
// Renames (or moves) files for a server.
|
||||
func putServerRenameFiles(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
var data struct {
|
||||
Root string `json:"root"`
|
||||
@@ -95,8 +89,7 @@ func putServerRenameFiles(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
|
||||
g, ctx := errgroup.WithContext(c.Request.Context())
|
||||
// Loop over the array of files passed in and perform the move or rename action against each.
|
||||
for _, p := range data.Files {
|
||||
pf := path.Join(data.Root, p.From)
|
||||
@@ -107,16 +100,20 @@ func putServerRenameFiles(c *gin.Context) {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
if err := s.Filesystem().Rename(pf, pt); err != nil {
|
||||
fs := s.Filesystem()
|
||||
// Ignore renames on a file that is on the denylist (both as the rename from or
|
||||
// the rename to value).
|
||||
if err := fs.IsIgnored(pf, pt); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fs.Rename(pf, pt); err != nil {
|
||||
// Return nil if the error is an is not exists.
|
||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
})
|
||||
@@ -130,7 +127,7 @@ func putServerRenameFiles(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortFilesystemError(c)
|
||||
NewServerError(err, s).AbortFilesystemError(c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -139,7 +136,7 @@ func putServerRenameFiles(c *gin.Context) {
|
||||
|
||||
// Copies a server file.
|
||||
func postServerCopyFile(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
var data struct {
|
||||
Location string `json:"location"`
|
||||
@@ -149,8 +146,12 @@ func postServerCopyFile(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.Filesystem().IsIgnored(data.Location); err != nil {
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
if err := s.Filesystem().Copy(data.Location); err != nil {
|
||||
TrackedServerError(err, s).AbortFilesystemError(c)
|
||||
NewServerError(err, s).AbortFilesystemError(c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -159,7 +160,7 @@ func postServerCopyFile(c *gin.Context) {
|
||||
|
||||
// Deletes files from a server.
|
||||
func postServerDeleteFiles(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
var data struct {
|
||||
Root string `json:"root"`
|
||||
@@ -195,7 +196,7 @@ func postServerDeleteFiles(c *gin.Context) {
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -204,33 +205,108 @@ func postServerDeleteFiles(c *gin.Context) {
|
||||
|
||||
// Writes the contents of the request to a file on a server.
|
||||
func postServerWriteFile(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
f, err := url.QueryUnescape(c.Query("file"))
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
f := c.Query("file")
|
||||
f = "/" + strings.TrimLeft(f, "/")
|
||||
|
||||
if err := s.Filesystem().IsIgnored(f); err != nil {
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil {
|
||||
if errors.Is(err, filesystem.ErrIsDirectory) {
|
||||
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortFilesystemError(c)
|
||||
NewServerError(err, s).AbortFilesystemError(c)
|
||||
return
|
||||
}
|
||||
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// Returns all of the currently in-progress file downloads and their current download
|
||||
// progress. The progress is also pushed out via a websocket event allowing you to just
|
||||
// call this once to get current downloads, and then listen to targeted websocket events
|
||||
// with the current progress for everything.
|
||||
func getServerPullingFiles(c *gin.Context) {
|
||||
s := ExtractServer(c)
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"downloads": downloader.ByServer(s.Id()),
|
||||
})
|
||||
}
|
||||
|
||||
// Writes the contents of the remote URL to a file on a server.
|
||||
func postServerPullRemoteFile(c *gin.Context) {
|
||||
s := ExtractServer(c)
|
||||
var data struct {
|
||||
URL string `binding:"required" json:"url"`
|
||||
Directory string `binding:"required,omitempty" json:"directory"`
|
||||
}
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
u, err := url.Parse(data.URL)
|
||||
if err != nil {
|
||||
if e, ok := err.(*url.Error); ok {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "An error occurred while parsing that URL: " + e.Err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
WithError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.Filesystem().HasSpaceErr(true); err != nil {
|
||||
WithError(c, err)
|
||||
return
|
||||
}
|
||||
// Do not allow more than three simultaneous remote file downloads at one time.
|
||||
if len(downloader.ByServer(s.Id())) >= 3 {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "This server has reached its limit of 3 simultaneous remote file downloads at once. Please wait for one to complete before trying again.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
dl := downloader.New(s, downloader.DownloadRequest{
|
||||
URL: u,
|
||||
Directory: data.Directory,
|
||||
})
|
||||
|
||||
// Execute this pull in a seperate thread since it may take a long time to complete.
|
||||
go func() {
|
||||
s.Log().WithField("download_id", dl.Identifier).WithField("url", u.String()).Info("starting pull of remote file to disk")
|
||||
if err := dl.Execute(); err != nil {
|
||||
s.Log().WithField("download_id", dl.Identifier).WithField("error", err).Error("failed to pull remote file")
|
||||
} else {
|
||||
s.Log().WithField("download_id", dl.Identifier).Info("completed pull of remote file")
|
||||
}
|
||||
}()
|
||||
|
||||
c.JSON(http.StatusAccepted, gin.H{
|
||||
"identifier": dl.Identifier,
|
||||
})
|
||||
}
|
||||
|
||||
// Stops a remote file download if it exists and belongs to this server.
|
||||
func deleteServerPullRemoteFile(c *gin.Context) {
|
||||
s := ExtractServer(c)
|
||||
if dl := downloader.ByID(c.Param("download")); dl != nil && dl.BelongsTo(s) {
|
||||
dl.Cancel()
|
||||
}
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// Create a directory on a server.
|
||||
func postServerCreateDirectory(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
var data struct {
|
||||
Name string `json:"name"`
|
||||
@@ -249,7 +325,7 @@ func postServerCreateDirectory(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -257,7 +333,7 @@ func postServerCreateDirectory(c *gin.Context) {
|
||||
}
|
||||
|
||||
func postServerCompressFiles(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := ExtractServer(c)
|
||||
|
||||
var data struct {
|
||||
RootPath string `json:"root"`
|
||||
@@ -284,72 +360,126 @@ func postServerCompressFiles(c *gin.Context) {
|
||||
|
||||
f, err := s.Filesystem().CompressFiles(data.RootPath, data.Files)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortFilesystemError(c)
|
||||
NewServerError(err, s).AbortFilesystemError(c)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, &filesystem.Stat{
|
||||
Info: f,
|
||||
FileInfo: f,
|
||||
Mimetype: "application/tar+gzip",
|
||||
})
|
||||
}
|
||||
|
||||
// postServerDecompressFiles receives the HTTP request and starts the process
|
||||
// of unpacking an archive that exists on the server into the provided RootPath
|
||||
// for the server.
|
||||
func postServerDecompressFiles(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
|
||||
s := middleware.ExtractServer(c)
|
||||
lg := middleware.ExtractLogger(c)
|
||||
var data struct {
|
||||
RootPath string `json:"root"`
|
||||
File string `json:"file"`
|
||||
}
|
||||
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hasSpace, err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File)
|
||||
lg = lg.WithFields(log.Fields{"root_path": data.RootPath, "file": data.File})
|
||||
lg.Debug("checking if space is available for file decompression")
|
||||
err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File)
|
||||
if err != nil {
|
||||
// Handle an unknown format error.
|
||||
if errors.Is(err, filesystem.ErrUnknownArchiveFormat) {
|
||||
s.Log().WithField("error", err).Warn("failed to decompress file due to unknown format")
|
||||
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "unknown archive format",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
|
||||
if !hasSpace {
|
||||
c.AbortWithStatusJSON(http.StatusConflict, gin.H{
|
||||
"error": "This server does not have enough available disk space to decompress this archive.",
|
||||
})
|
||||
if filesystem.IsErrorCode(err, filesystem.ErrCodeUnknownArchive) {
|
||||
lg.WithField("error", err).Warn("failed to decompress file: unknown archive format")
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "The archive provided is in a format Wings does not understand."})
|
||||
return
|
||||
}
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
lg.Info("starting file decompression")
|
||||
if err := s.Filesystem().DecompressFile(data.RootPath, data.File); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested archive was not found.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// If the file is busy for some reason just return a nicer error to the user since there is not
|
||||
// much we specifically can do. They'll need to stop the running server process in order to overwrite
|
||||
// a file like this.
|
||||
if strings.Contains(err.Error(), "text file busy") {
|
||||
s.Log().WithField("error", err).Warn("failed to decompress file due to busy text file")
|
||||
|
||||
lg.WithField("error", err).Warn("failed to decompress file: text file busy")
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "One or more files this archive is attempting to overwrite are currently in use by another process. Please try again.",
|
||||
})
|
||||
return
|
||||
}
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
TrackedServerError(err, s).AbortFilesystemError(c)
|
||||
type chmodFile struct {
|
||||
File string `json:"file"`
|
||||
Mode string `json:"mode"`
|
||||
}
|
||||
|
||||
var errInvalidFileMode = errors.New("invalid file mode")
|
||||
|
||||
func postServerChmodFile(c *gin.Context) {
|
||||
s := ExtractServer(c)
|
||||
|
||||
var data struct {
|
||||
Root string `json:"root"`
|
||||
Files []chmodFile `json:"files"`
|
||||
}
|
||||
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
log.Debug(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if len(data.Files) == 0 {
|
||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||
"error": "No files to chmod were provided.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(context.Background())
|
||||
|
||||
// Loop over the array of files passed in and perform the move or rename action against each.
|
||||
for _, p := range data.Files {
|
||||
g.Go(func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
mode, err := strconv.ParseUint(p.Mode, 8, 32)
|
||||
if err != nil {
|
||||
return errInvalidFileMode
|
||||
}
|
||||
|
||||
if err := s.Filesystem().Chmod(path.Join(data.Root, p.File), os.FileMode(mode)); err != nil {
|
||||
// Return nil if the error is an is not exists.
|
||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
if errors.Is(err, errInvalidFileMode) {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Invalid file mode.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
NewServerError(err, s).AbortFilesystemError(c)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -357,14 +487,16 @@ func postServerDecompressFiles(c *gin.Context) {
|
||||
}
|
||||
|
||||
func postServerUploadFiles(c *gin.Context) {
|
||||
manager := middleware.ExtractManager(c)
|
||||
|
||||
token := tokens.UploadPayload{}
|
||||
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
||||
TrackedError(err).AbortWithServerError(c)
|
||||
NewTrackedError(err).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
s := GetServer(token.ServerUuid)
|
||||
if s == nil || !token.IsUniqueRequest() {
|
||||
s, ok := manager.Get(token.ServerUuid)
|
||||
if !ok || !token.IsUniqueRequest() {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||
"error": "The requested resource was not found on this server.",
|
||||
})
|
||||
@@ -397,14 +529,14 @@ func postServerUploadFiles(c *gin.Context) {
|
||||
for _, header := range headers {
|
||||
p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename))
|
||||
if err != nil {
|
||||
c.AbortWithError(http.StatusInternalServerError, err)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
// We run this in a different method so I can use defer without any of
|
||||
// the consequences caused by calling it in a loop.
|
||||
if err := handleFileUpload(p, s, header); err != nil {
|
||||
c.AbortWithError(http.StatusInternalServerError, err)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -413,12 +545,15 @@ func postServerUploadFiles(c *gin.Context) {
|
||||
func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader) error {
|
||||
file, err := header.Open()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if err := s.Filesystem().IsIgnored(p); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.Filesystem().Writefile(p, file); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3,18 +3,21 @@ package router
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
ws "github.com/gorilla/websocket"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/router/websocket"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Upgrades a connection to a websocket and passes events along between.
|
||||
func getServerWebsocket(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
manager := middleware.ExtractManager(c)
|
||||
s, _ := manager.Get(c.Param("server"))
|
||||
handler, err := websocket.GetHandler(s, c.Writer, c.Request)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).AbortWithServerError(c)
|
||||
NewServerError(err, s).Abort(c)
|
||||
return
|
||||
}
|
||||
defer handler.Connection.Close()
|
||||
@@ -24,14 +27,14 @@ func getServerWebsocket(c *gin.Context) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Track this open connection on the server so that we can close them all programtically
|
||||
// Track this open connection on the server so that we can close them all programmatically
|
||||
// if the server is deleted.
|
||||
s.Websockets().Push(handler.Uuid(), &cancel)
|
||||
defer s.Websockets().Remove(handler.Uuid())
|
||||
|
||||
// Listen for the context being canceled and then close the websocket connection. This normally
|
||||
// just happens because you're disconnecting from the socket in the browser, however in some
|
||||
// cases we close the connections programatically (e.g. deleting the server) and need to send
|
||||
// cases we close the connections programmatically (e.g. deleting the server) and need to send
|
||||
// a close message to the websocket so it disconnects.
|
||||
go func(ctx context.Context, c *ws.Conn) {
|
||||
ListenerLoop:
|
||||
|
||||
@@ -2,21 +2,22 @@ package router
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/installer"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Returns information about the system that wings is running on.
|
||||
func getSystemInformation(c *gin.Context) {
|
||||
i, err := system.GetSystemInformation()
|
||||
if err != nil {
|
||||
TrackedError(err).AbortWithServerError(c)
|
||||
NewTrackedError(err).Abort(c)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -27,16 +28,17 @@ func getSystemInformation(c *gin.Context) {
|
||||
// Returns all of the servers that are registered and configured correctly on
|
||||
// this wings instance.
|
||||
func getAllServers(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, server.GetServers().All())
|
||||
c.JSON(http.StatusOK, middleware.ExtractManager(c).All())
|
||||
}
|
||||
|
||||
// Creates a new server on the wings daemon and begins the installation process
|
||||
// for it.
|
||||
func postCreateServer(c *gin.Context) {
|
||||
manager := middleware.ExtractManager(c)
|
||||
buf := bytes.Buffer{}
|
||||
buf.ReadFrom(c.Request.Body)
|
||||
|
||||
install, err := installer.New(buf.Bytes())
|
||||
install, err := installer.New(c.Request.Context(), manager, buf.Bytes())
|
||||
if err != nil {
|
||||
if installer.IsValidationError(err) {
|
||||
c.AbortWithStatusJSON(http.StatusUnprocessableEntity, gin.H{
|
||||
@@ -45,13 +47,13 @@ func postCreateServer(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
TrackedError(err).AbortWithServerError(c)
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Plop that server instance onto the request so that it can be referenced in
|
||||
// requests from here-on out.
|
||||
server.GetServers().Add(install.Server())
|
||||
manager.Add(install.Server())
|
||||
|
||||
// Begin the installation process in the background to not block the request
|
||||
// cycle. If there are any errors they will be logged and communicated back
|
||||
@@ -71,37 +73,29 @@ func postCreateServer(c *gin.Context) {
|
||||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
// Updates the running configuration for this daemon instance.
|
||||
// Updates the running configuration for this Wings instance.
|
||||
func postUpdateConfiguration(c *gin.Context) {
|
||||
// A backup of the configuration for error purposes.
|
||||
ccopy := *config.Get()
|
||||
// A copy of the configuration we're using to bind the data received into.
|
||||
cfg := *config.Get()
|
||||
|
||||
// BindJSON sends 400 if the request fails, all we need to do is return
|
||||
cfg := config.Get()
|
||||
if err := c.BindJSON(&cfg); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Keep the SSL certificates the same since the Panel will send through Lets Encrypt
|
||||
// default locations. However, if we picked a different location manually we don't
|
||||
// want to override that.
|
||||
//
|
||||
// If you pass through manual locations in the API call this logic will be skipped.
|
||||
if strings.HasPrefix(cfg.Api.Ssl.KeyFile, "/etc/letsencrypt/live/") {
|
||||
cfg.Api.Ssl.KeyFile = ccopy.Api.Ssl.KeyFile
|
||||
cfg.Api.Ssl.CertificateFile = ccopy.Api.Ssl.CertificateFile
|
||||
cfg.Api.Ssl.KeyFile = strings.ToLower(config.Get().Api.Ssl.KeyFile)
|
||||
cfg.Api.Ssl.CertificateFile = strings.ToLower(config.Get().Api.Ssl.CertificateFile)
|
||||
}
|
||||
|
||||
config.Set(&cfg)
|
||||
if err := config.Get().WriteToDisk(); err != nil {
|
||||
// If there was an error writing to the disk, revert back to the configuration we had
|
||||
// before this code was run.
|
||||
config.Set(&ccopy)
|
||||
|
||||
TrackedError(err).AbortWithServerError(c)
|
||||
// Try to write this new configuration to the disk before updating our global
|
||||
// state with it.
|
||||
if err := config.WriteToDisk(cfg); err != nil {
|
||||
WithError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Since we wrote it to the disk successfully now update the global configuration
|
||||
// state to use this new configuration struct.
|
||||
config.Set(cfg)
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -2,28 +2,61 @@ package router
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"github.com/apex/log"
|
||||
"github.com/buger/jsonparser"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/installer"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/mitchellh/colorstring"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/installer"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/router/middleware"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
// Number of ticks in the progress bar
|
||||
const ticks = 25
|
||||
|
||||
// 100% / number of ticks = percentage represented by each tick
|
||||
const tickPercentage = 100 / ticks
|
||||
|
||||
type downloadProgress struct {
|
||||
size int64
|
||||
progress int64
|
||||
}
|
||||
|
||||
// Data passed over to initiate a server transfer.
|
||||
type serverTransferRequest struct {
|
||||
ServerID string `binding:"required" json:"server_id"`
|
||||
URL string `binding:"required" json:"url"`
|
||||
Token string `binding:"required" json:"token"`
|
||||
Server json.RawMessage `json:"server"`
|
||||
}
|
||||
|
||||
func getArchivePath(sID string) string {
|
||||
return filepath.Join(config.Get().System.ArchiveDirectory, sID+".tar.gz")
|
||||
}
|
||||
|
||||
// Returns the archive for a server so that it can be transferred to a new node.
|
||||
func getServerArchive(c *gin.Context) {
|
||||
auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2)
|
||||
|
||||
@@ -37,101 +70,81 @@ func getServerArchive(c *gin.Context) {
|
||||
|
||||
token := tokens.TransferPayload{}
|
||||
if err := tokens.ParseToken([]byte(auth[1]), &token); err != nil {
|
||||
TrackedError(err).AbortWithServerError(c)
|
||||
NewTrackedError(err).Abort(c)
|
||||
return
|
||||
}
|
||||
|
||||
if token.Subject != c.Param("server") {
|
||||
s := ExtractServer(c)
|
||||
if token.Subject != s.Id() {
|
||||
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
|
||||
"error": "( .. •˘___˘• .. )",
|
||||
"error": "Missing required token subject, or subject is not valid for the requested server.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
s := GetServer(c.Param("server"))
|
||||
archivePath := getArchivePath(s.Id())
|
||||
|
||||
st, err := s.Archiver.Stat()
|
||||
// Stat the archive file.
|
||||
st, err := os.Lstat(archivePath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
TrackedServerError(err, s).SetMessage("failed to stat archive").AbortWithServerError(c)
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
_ = WithError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
c.AbortWithStatus(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
checksum, err := s.Archiver.Checksum()
|
||||
// Compute sha1 checksum.
|
||||
h := sha256.New()
|
||||
f, err := os.Open(archivePath)
|
||||
if err != nil {
|
||||
TrackedServerError(err, s).SetMessage("failed to calculate checksum").AbortWithServerError(c)
|
||||
return
|
||||
}
|
||||
|
||||
file, err := os.Open(s.Archiver.Path())
|
||||
if err != nil {
|
||||
tserr := TrackedServerError(err, s)
|
||||
if !os.IsNotExist(err) {
|
||||
tserr.SetMessage("failed to open archive for reading")
|
||||
} else {
|
||||
tserr.SetMessage("failed to open archive")
|
||||
}
|
||||
|
||||
tserr.AbortWithServerError(c)
|
||||
if _, err := io.Copy(h, bufio.NewReader(f)); err != nil {
|
||||
_ = f.Close()
|
||||
_ = WithError(c, err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
if err := f.Close(); err != nil {
|
||||
_ = WithError(c, err)
|
||||
return
|
||||
}
|
||||
checksum := hex.EncodeToString(h.Sum(nil))
|
||||
|
||||
// Stream the file to the client.
|
||||
f, err = os.Open(archivePath)
|
||||
if err != nil {
|
||||
_ = WithError(c, err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
c.Header("X-Checksum", checksum)
|
||||
c.Header("X-Mime-Type", st.Mimetype)
|
||||
c.Header("Content-Length", strconv.Itoa(int(st.Info.Size())))
|
||||
c.Header("Content-Disposition", "attachment; filename="+s.Archiver.Name())
|
||||
c.Header("X-Mime-Type", "application/tar+gzip")
|
||||
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
||||
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(s.Id()+".tar.gz"))
|
||||
c.Header("Content-Type", "application/octet-stream")
|
||||
|
||||
bufio.NewReader(file).WriteTo(c.Writer)
|
||||
_, _ = bufio.NewReader(f).WriteTo(c.Writer)
|
||||
}
|
||||
|
||||
func postServerArchive(c *gin.Context) {
|
||||
s := GetServer(c.Param("server"))
|
||||
s := middleware.ExtractServer(c)
|
||||
manager := middleware.ExtractManager(c)
|
||||
|
||||
go func(s *server.Server) {
|
||||
if err := s.Archiver.Archive(); err != nil {
|
||||
s.Log().WithField("error", err).Error("failed to get archive for server")
|
||||
return
|
||||
l := log.WithField("server", s.Id())
|
||||
|
||||
// This function automatically adds the Source Node prefix and Timestamp to the log
|
||||
// output before sending it over the websocket.
|
||||
sendTransferLog := func(data string) {
|
||||
output := colorstring.Color(fmt.Sprintf("[yellow][bold]%s [Pterodactyl Transfer System] [Source Node]:[default] %s", time.Now().Format(time.RFC1123), data))
|
||||
s.Events().Publish(server.TransferLogsEvent, output)
|
||||
}
|
||||
|
||||
s.Log().Debug("successfully created server archive, notifying panel")
|
||||
|
||||
r := api.NewRequester()
|
||||
rerr, err := r.SendArchiveStatus(s.Id(), true)
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
s.Log().WithField("error", err).Error("failed to notify panel of archive status")
|
||||
return
|
||||
}
|
||||
|
||||
s.Log().WithField("error", rerr.String()).Error("panel returned an error when sending the archive status")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
s.Log().Debug("successfully notified panel of archive status")
|
||||
}(s)
|
||||
|
||||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
func postTransfer(c *gin.Context) {
|
||||
buf := bytes.Buffer{}
|
||||
buf.ReadFrom(c.Request.Body)
|
||||
|
||||
go func(data []byte) {
|
||||
serverID, _ := jsonparser.GetString(data, "server_id")
|
||||
url, _ := jsonparser.GetString(data, "url")
|
||||
token, _ := jsonparser.GetString(data, "token")
|
||||
|
||||
l := log.WithField("server", serverID)
|
||||
// Create an http client with no timeout.
|
||||
client := &http.Client{Timeout: 0}
|
||||
s.Events().Publish(server.TransferStatusEvent, "starting")
|
||||
sendTransferLog("Attempting to archive server...")
|
||||
|
||||
hasError := true
|
||||
defer func() {
|
||||
@@ -139,152 +152,346 @@ func postTransfer(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
l.Info("server transfer failed, notifying panel")
|
||||
rerr, err := api.NewRequester().SendTransferFailure(serverID)
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
l.WithField("error", err).Error("failed to notify panel with transfer failure")
|
||||
// Mark the server as not being transferred so it can actually be used.
|
||||
s.SetTransferring(false)
|
||||
s.Events().Publish(server.TransferStatusEvent, "failure")
|
||||
|
||||
sendTransferLog("Attempting to notify panel of archive failure..")
|
||||
if err := manager.Client().SetArchiveStatus(s.Context(), s.Id(), false); err != nil {
|
||||
if !remote.IsRequestError(err) {
|
||||
sendTransferLog("Failed to notify panel of archive failure: " + err.Error())
|
||||
l.WithField("error", err).Error("failed to notify panel of failed archive status")
|
||||
return
|
||||
}
|
||||
|
||||
l.WithField("error", errors.WithStack(rerr)).Error("received error response from panel while notifying of transfer failure")
|
||||
sendTransferLog("Panel returned an error while notifying it of a failed archive: " + err.Error())
|
||||
l.WithField("error", err.Error()).Error("panel returned an error when notifying it of a failed archive status")
|
||||
return
|
||||
}
|
||||
|
||||
l.Debug("notified panel of transfer failure")
|
||||
sendTransferLog("Successfully notified panel of failed archive status")
|
||||
l.Info("successfully notified panel of failed archive status")
|
||||
}()
|
||||
|
||||
// Make a new GET request to the URL the panel gave us.
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
log.WithField("error", errors.WithStack(err)).Error("failed to create http request for archive transfer")
|
||||
// Mark the server as transferring to prevent problems.
|
||||
s.SetTransferring(true)
|
||||
|
||||
// Ensure the server is offline. Sometimes a "No such container" error gets through
|
||||
// which means the server is already stopped. We can ignore that.
|
||||
if err := s.Environment.WaitForStop(60, false); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") {
|
||||
sendTransferLog("Failed to stop server, aborting transfer..")
|
||||
l.WithField("error", err).Error("failed to stop server")
|
||||
return
|
||||
}
|
||||
|
||||
// Add the authorization header.
|
||||
req.Header.Set("Authorization", token)
|
||||
// Create an archive of the entire server's data directory.
|
||||
a := &filesystem.Archive{
|
||||
BasePath: s.Filesystem().Path(),
|
||||
}
|
||||
|
||||
// Execute the http request.
|
||||
res, err := client.Do(req)
|
||||
// Attempt to get an archive of the server.
|
||||
if err := a.Create(getArchivePath(s.Id())); err != nil {
|
||||
sendTransferLog("An error occurred while archiving the server: " + err.Error())
|
||||
l.WithField("error", err).Error("failed to get transfer archive for server")
|
||||
return
|
||||
}
|
||||
|
||||
sendTransferLog("Successfully created archive, attempting to notify panel..")
|
||||
l.Info("successfully created server transfer archive, notifying panel..")
|
||||
|
||||
if err := manager.Client().SetArchiveStatus(s.Context(), s.Id(), true); err != nil {
|
||||
if !remote.IsRequestError(err) {
|
||||
sendTransferLog("Failed to notify panel of archive success: " + err.Error())
|
||||
l.WithField("error", err).Error("failed to notify panel of successful archive status")
|
||||
return
|
||||
}
|
||||
|
||||
sendTransferLog("Panel returned an error while notifying it of a successful archive: " + err.Error())
|
||||
l.WithField("error", err.Error()).Error("panel returned an error when notifying it of a successful archive status")
|
||||
return
|
||||
}
|
||||
|
||||
hasError = false
|
||||
|
||||
// This log may not be displayed by the client due to the status event being sent before or at the same time.
|
||||
sendTransferLog("Successfully notified panel of successful archive status")
|
||||
|
||||
l.Info("successfully notified panel of successful transfer archive status")
|
||||
s.Events().Publish(server.TransferStatusEvent, "archived")
|
||||
}(s)
|
||||
|
||||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
func (w *downloadProgress) Write(v []byte) (int, error) {
|
||||
n := len(v)
|
||||
atomic.AddInt64(&w.progress, int64(n))
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Log helper function to attach all errors and info output to a consistently formatted
|
||||
// log string for easier querying.
|
||||
func (str serverTransferRequest) log() *log.Entry {
|
||||
return log.WithField("subsystem", "transfers").WithField("server_id", str.ServerID)
|
||||
}
|
||||
|
||||
// Downloads an archive from the machine that the server currently lives on.
|
||||
func (str serverTransferRequest) downloadArchive() (*http.Response, error) {
|
||||
client := http.Client{Timeout: 0}
|
||||
req, err := http.NewRequest(http.MethodGet, str.URL, nil)
|
||||
if err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to send archive http request")
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Authorization", str.Token)
|
||||
res, err := client.Do(req) // lgtm [go/request-forgery]
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Returns the path to the local archive on the system.
|
||||
func (str serverTransferRequest) path() string {
|
||||
return getArchivePath(str.ServerID)
|
||||
}
|
||||
|
||||
// Creates the archive location on this machine by first checking that the required file
|
||||
// does not already exist. If it does exist, the file is deleted and then re-created as
|
||||
// an empty file.
|
||||
func (str serverTransferRequest) createArchiveFile() (*os.File, error) {
|
||||
p := str.path()
|
||||
if _, err := os.Stat(p); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else if err := os.Remove(p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.Create(p)
|
||||
}
|
||||
|
||||
// Deletes the archive from the local filesystem. This is executed as a deferred function.
|
||||
func (str serverTransferRequest) removeArchivePath() {
|
||||
p := str.path()
|
||||
str.log().Debug("deleting temporary transfer archive")
|
||||
if err := os.Remove(p); err != nil && !os.IsNotExist(err) {
|
||||
str.log().WithField("path", p).WithField("error", err).Error("failed to delete temporary transfer archive file")
|
||||
return
|
||||
}
|
||||
str.log().Debug("deleted temporary transfer archive successfully")
|
||||
}
|
||||
|
||||
// Verifies that the SHA-256 checksum of the file on the local filesystem matches the
|
||||
// expected value from the transfer request. The string value returned is the computed
|
||||
// checksum on the system.
|
||||
func (str serverTransferRequest) verifyChecksum(matches string) (bool, string, error) {
|
||||
f, err := os.Open(str.path())
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
defer f.Close()
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, bufio.NewReader(f)); err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
checksum := hex.EncodeToString(h.Sum(nil))
|
||||
return checksum == matches, checksum, nil
|
||||
}
|
||||
|
||||
// Sends a notification to the Panel letting it know what the status of this transfer is.
|
||||
func (str serverTransferRequest) sendTransferStatus(client remote.Client, successful bool) error {
|
||||
lg := str.log().WithField("transfer_successful", successful)
|
||||
lg.Info("notifying Panel of server transfer state")
|
||||
if err := client.SetTransferStatus(context.Background(), str.ServerID, successful); err != nil {
|
||||
lg.WithField("error", err).Error("error notifying panel of transfer state")
|
||||
return err
|
||||
}
|
||||
lg.Debug("notified panel of transfer state")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initiates a transfer between two nodes for a server by downloading an archive from the
|
||||
// remote node and then applying the server details to this machine.
|
||||
func postTransfer(c *gin.Context) {
|
||||
var data serverTransferRequest
|
||||
if err := c.BindJSON(&data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
manager := middleware.ExtractManager(c)
|
||||
u, err := uuid.Parse(data.ServerID)
|
||||
if err != nil {
|
||||
WithError(c, err)
|
||||
return
|
||||
}
|
||||
// Force the server ID to be a valid UUID string at this point. If it is not an error
|
||||
// is returned to the caller. This limits injection vulnerabilities that would cause
|
||||
// the str.path() function to return a location not within the server archive directory.
|
||||
data.ServerID = u.String()
|
||||
|
||||
data.log().Info("handling incoming server transfer request")
|
||||
go func(data *serverTransferRequest) {
|
||||
hasError := true
|
||||
|
||||
// Create a new server installer. This will only configure the environment and not
|
||||
// run the installer scripts.
|
||||
i, err := installer.New(context.Background(), manager, data.Server)
|
||||
if err != nil {
|
||||
_ = data.sendTransferStatus(manager.Client(), false)
|
||||
data.log().WithField("error", err).Error("failed to validate received server data")
|
||||
return
|
||||
}
|
||||
|
||||
// This function automatically adds the Target Node prefix and Timestamp to the log output before sending it
|
||||
// over the websocket.
|
||||
sendTransferLog := func(data string) {
|
||||
output := colorstring.Color(fmt.Sprintf("[yellow][bold]%s [Pterodactyl Transfer System] [Target Node]:[default] %s", time.Now().Format(time.RFC1123), data))
|
||||
i.Server().Events().Publish(server.TransferLogsEvent, output)
|
||||
}
|
||||
|
||||
// Mark the server as transferring to prevent problems later on during the process and
|
||||
// then push the server into the global server collection for this instance.
|
||||
i.Server().SetTransferring(true)
|
||||
manager.Add(i.Server())
|
||||
defer func(s *server.Server) {
|
||||
// In the event that this transfer call fails, remove the server from the global
|
||||
// server tracking so that we don't have a dangling instance.
|
||||
if err := data.sendTransferStatus(manager.Client(), !hasError); hasError || err != nil {
|
||||
sendTransferLog("Server transfer failed, check Wings logs for additional information.")
|
||||
s.Events().Publish(server.TransferStatusEvent, "failure")
|
||||
manager.Remove(func(match *server.Server) bool {
|
||||
return match.Id() == s.Id()
|
||||
})
|
||||
|
||||
// If the transfer status was successful but the request failed, act like the transfer failed.
|
||||
if !hasError && err != nil {
|
||||
// Delete all extracted files.
|
||||
if err := os.RemoveAll(s.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
|
||||
data.log().WithField("error", err).Warn("failed to delete local server files directory")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.SetTransferring(false)
|
||||
s.Events().Publish(server.TransferStatusEvent, "success")
|
||||
sendTransferLog("Transfer completed.")
|
||||
}
|
||||
}(i.Server())
|
||||
|
||||
data.log().Info("downloading server archive from current server node")
|
||||
sendTransferLog("Received incoming transfer from Panel, attempting to download archive from source node...")
|
||||
res, err := data.downloadArchive()
|
||||
if err != nil {
|
||||
sendTransferLog("Failed to retrieve server archive from remote node: " + err.Error())
|
||||
data.log().WithField("error", err).Error("failed to download archive for server transfer")
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
data.log().WithField("error", err).WithField("status", res.StatusCode).Error("unexpected error response from transfer endpoint")
|
||||
return
|
||||
}
|
||||
|
||||
// Handle non-200 status codes.
|
||||
if res.StatusCode != 200 {
|
||||
_, err := ioutil.ReadAll(res.Body)
|
||||
size := res.ContentLength
|
||||
if size == 0 {
|
||||
data.log().WithField("error", err).Error("received an archive response with Content-Length of 0")
|
||||
return
|
||||
}
|
||||
sendTransferLog("Got server archive response from remote node. (Content-Length: " + strconv.Itoa(int(size)) + ")")
|
||||
sendTransferLog("Creating local archive file...")
|
||||
file, err := data.createArchiveFile()
|
||||
if err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).WithField("status", res.StatusCode).Error("failed read transfer response body")
|
||||
|
||||
data.log().WithField("error", err).Error("failed to create archive file on local filesystem")
|
||||
return
|
||||
}
|
||||
|
||||
l.WithField("error", errors.WithStack(err)).WithField("status", res.StatusCode).Error("failed to request server archive")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Get the path to the archive.
|
||||
archivePath := filepath.Join(config.Get().System.ArchiveDirectory, serverID+".tar.gz")
|
||||
|
||||
// Check if the archive already exists and delete it if it does.
|
||||
_, err = os.Stat(archivePath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to stat archive file")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := os.Remove(archivePath); err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Warn("failed to remove old archive file")
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Create the file.
|
||||
file, err := os.Create(archivePath)
|
||||
if err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to open archive on disk")
|
||||
|
||||
return
|
||||
}
|
||||
sendTransferLog("Writing archive to disk...")
|
||||
data.log().Info("writing transfer archive to disk...")
|
||||
|
||||
// Copy the file.
|
||||
progress := &downloadProgress{size: size}
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
go func(progress *downloadProgress, t *time.Ticker) {
|
||||
for range ticker.C {
|
||||
// p = 100 (Downloaded)
|
||||
// size = 1000 (Content-Length)
|
||||
// p / size = 0.1
|
||||
// * 100 = 10% (Multiply by 100 to get a percentage of the download)
|
||||
// 10% / tickPercentage = (10% / (100 / 25)) (Divide by tick percentage to get the number of ticks)
|
||||
// 2.5 (Number of ticks as a float64)
|
||||
// 2 (convert to an integer)
|
||||
p := atomic.LoadInt64(&progress.progress)
|
||||
// We have to cast these numbers to float in order to get a float result from the division.
|
||||
width := ((float64(p) / float64(size)) * 100) / tickPercentage
|
||||
bar := strings.Repeat("=", int(width)) + strings.Repeat(" ", ticks-int(width))
|
||||
sendTransferLog("Downloading [" + bar + "] " + system.FormatBytes(p) + " / " + system.FormatBytes(progress.size))
|
||||
}
|
||||
}(progress, ticker)
|
||||
|
||||
var reader io.Reader
|
||||
downloadLimit := float64(config.Get().System.Transfers.DownloadLimit) * 1024 * 1024
|
||||
if downloadLimit > 0 {
|
||||
// Wrap the body with a reader that is limited to the defined download limit speed.
|
||||
reader = ratelimit.Reader(res.Body, ratelimit.NewBucketWithRate(downloadLimit, int64(downloadLimit)))
|
||||
} else {
|
||||
reader = res.Body
|
||||
}
|
||||
|
||||
buf := make([]byte, 1024*4)
|
||||
_, err = io.CopyBuffer(file, res.Body, buf)
|
||||
if err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to copy archive file to disk")
|
||||
if _, err := io.CopyBuffer(file, io.TeeReader(reader, progress), buf); err != nil {
|
||||
ticker.Stop()
|
||||
_ = file.Close()
|
||||
|
||||
sendTransferLog("Failed while writing archive file to disk: " + err.Error())
|
||||
data.log().WithField("error", err).Error("failed to copy archive file to disk")
|
||||
return
|
||||
}
|
||||
ticker.Stop()
|
||||
|
||||
// Show 100% completion.
|
||||
humanSize := system.FormatBytes(progress.size)
|
||||
sendTransferLog("Downloading [" + strings.Repeat("=", ticks) + "] " + humanSize + " / " + humanSize)
|
||||
|
||||
// Close the file so it can be opened to verify the checksum.
|
||||
if err := file.Close(); err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to close archive file")
|
||||
data.log().WithField("error", err).Error("unable to close archive file on local filesystem")
|
||||
return
|
||||
}
|
||||
data.log().Info("finished writing transfer archive to disk")
|
||||
sendTransferLog("Successfully wrote archive to disk.")
|
||||
|
||||
// Whenever the transfer fails or succeeds, delete the temporary transfer archive that
|
||||
// was created on the disk.
|
||||
defer data.removeArchivePath()
|
||||
|
||||
sendTransferLog("Verifying checksum of downloaded archive...")
|
||||
data.log().Info("computing checksum of downloaded archive file")
|
||||
expected := res.Header.Get("X-Checksum")
|
||||
if matches, computed, err := data.verifyChecksum(expected); err != nil {
|
||||
data.log().WithField("error", err).Error("encountered an error while calculating local filesystem archive checksum")
|
||||
return
|
||||
} else if !matches {
|
||||
sendTransferLog("@@@@@ CHECKSUM VERIFICATION FAILED @@@@@")
|
||||
sendTransferLog(" - Source Checksum: " + expected)
|
||||
sendTransferLog(" - Computed Checksum: " + computed)
|
||||
data.log().WithField("expected_sum", expected).WithField("computed_checksum", computed).Error("checksum mismatch when verifying integrity of local archive")
|
||||
return
|
||||
}
|
||||
|
||||
l.WithField("server", serverID).Debug("server archive downloaded, computing checksum...")
|
||||
|
||||
// Open the archive file for computing a checksum.
|
||||
file, err = os.Open(archivePath)
|
||||
if err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to open archive on disk")
|
||||
return
|
||||
}
|
||||
|
||||
// Compute the sha256 checksum of the file.
|
||||
hash := sha256.New()
|
||||
buf = make([]byte, 1024*4)
|
||||
if _, err := io.CopyBuffer(hash, file, buf); err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to copy archive file for checksum verification")
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the two checksums.
|
||||
if hex.EncodeToString(hash.Sum(nil)) != res.Header.Get("X-Checksum") {
|
||||
l.Error("checksum verification failed for archive")
|
||||
return
|
||||
}
|
||||
|
||||
// Close the file.
|
||||
if err := file.Close(); err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to close archive file after calculating checksum")
|
||||
return
|
||||
}
|
||||
|
||||
l.Info("server archive transfer was successful")
|
||||
|
||||
// Get the server data from the request.
|
||||
serverData, t, _, _ := jsonparser.Get(data, "server")
|
||||
if t != jsonparser.Object {
|
||||
l.Error("invalid server data passed in request")
|
||||
return
|
||||
}
|
||||
|
||||
// Create a new server installer (note this does not execute the install script)
|
||||
i, err := installer.New(serverData)
|
||||
if err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to validate received server data")
|
||||
return
|
||||
}
|
||||
|
||||
// Add the server to the collection.
|
||||
server.GetServers().Add(i.Server())
|
||||
|
||||
// Create the server's environment (note this does not execute the install script)
|
||||
// Create the server's environment.
|
||||
sendTransferLog("Creating server environment, this could take a while..")
|
||||
data.log().Info("creating server environment")
|
||||
if err := i.Server().CreateEnvironment(); err != nil {
|
||||
l.WithField("error", err).Error("failed to create server environment")
|
||||
data.log().WithField("error", err).Error("failed to create server environment")
|
||||
return
|
||||
}
|
||||
|
||||
// Un-archive the archive. That sounds weird..
|
||||
if err := archiver.NewTarGz().Unarchive(archivePath, i.Server().Filesystem().Path()); err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to extract server archive")
|
||||
sendTransferLog("Server environment has been created, extracting transfer archive..")
|
||||
data.log().Info("server environment configured, extracting transfer archive")
|
||||
if err := archiver.NewTarGz().Unarchive(data.path(), i.Server().Filesystem().Path()); err != nil {
|
||||
// Un-archiving failed, delete the server's data directory.
|
||||
if err := os.RemoveAll(i.Server().Filesystem().Path()); err != nil && !os.IsNotExist(err) {
|
||||
data.log().WithField("error", err).Warn("failed to delete local server files directory")
|
||||
}
|
||||
data.log().WithField("error", err).Error("failed to extract server archive")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -295,21 +502,9 @@ func postTransfer(c *gin.Context) {
|
||||
// hiccup or the fix of whatever error causing the success request to fail.
|
||||
hasError = false
|
||||
|
||||
// Notify the panel that the transfer succeeded.
|
||||
rerr, err := api.NewRequester().SendTransferSuccess(serverID)
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
l.WithField("error", errors.WithStack(err)).Error("failed to notify panel of transfer success")
|
||||
return
|
||||
}
|
||||
|
||||
l.WithField("error", errors.WithStack(rerr)).Error("panel responded with error after transfer success")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
l.Info("successfully notified panel of transfer success")
|
||||
}(buf.Bytes())
|
||||
data.log().Info("archive extracted successfully, notifying Panel of status")
|
||||
sendTransferLog("Archive extracted successfully.")
|
||||
}(&data)
|
||||
|
||||
c.Status(http.StatusAccepted)
|
||||
}
|
||||
|
||||
@@ -2,11 +2,40 @@ package tokens
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/apex/log"
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The time at which Wings was booted. No JWT's created before this time are allowed to
|
||||
// connect to the socket since they may have been marked as denied already and therefore
|
||||
// could be invalid at this point.
|
||||
//
|
||||
// By doing this we make it so that a user who gets disconnected from Wings due to a Wings
|
||||
// reboot just needs to request a new token as if their old token had expired naturally.
|
||||
var wingsBootTime = time.Now()
|
||||
|
||||
// A map that contains any JTI's that have been denied by the Panel and the time at which
|
||||
// they were marked as denied. Therefore any JWT with the same JTI and an IssuedTime that
|
||||
// is the same as or before this time should be considered invalid.
|
||||
//
|
||||
// This is used to allow the Panel to revoke tokens en-masse for a given user & server
|
||||
// combination since the JTI for tokens is just MD5(user.id + server.uuid). When a server
|
||||
// is booted this listing is fetched from the panel and the Websocket is dynamically updated.
|
||||
var denylist sync.Map
|
||||
|
||||
// Adds a JTI to the denylist by marking any JWTs generated before the current time as
|
||||
// being invalid if they use the same JTI.
|
||||
func DenyJTI(jti string) {
|
||||
log.WithField("jti", jti).Debugf("adding \"%s\" to JTI denylist", jti)
|
||||
|
||||
denylist.Store(jti, time.Now())
|
||||
}
|
||||
|
||||
// A JWT payload for Websocket connections. This JWT is passed along to the Websocket after
|
||||
// it has been connected to by sending an "auth" event.
|
||||
type WebsocketPayload struct {
|
||||
jwt.Payload
|
||||
sync.RWMutex
|
||||
@@ -24,6 +53,7 @@ func (p *WebsocketPayload) GetPayload() *jwt.Payload {
|
||||
return &p.Payload
|
||||
}
|
||||
|
||||
// Returns the UUID of the server associated with this JWT.
|
||||
func (p *WebsocketPayload) GetServerUuid() string {
|
||||
p.RLock()
|
||||
defer p.RUnlock()
|
||||
@@ -31,6 +61,33 @@ func (p *WebsocketPayload) GetServerUuid() string {
|
||||
return p.ServerUUID
|
||||
}
|
||||
|
||||
// Check if the JWT has been marked as denied by the instance due to either being issued
|
||||
// before Wings was booted, or because we have denied all tokens with the same JTI
|
||||
// occurring before a set time.
|
||||
func (p *WebsocketPayload) Denylisted() bool {
|
||||
// If there is no IssuedAt present for the token, we cannot validate the token so
|
||||
// just immediately mark it as not valid.
|
||||
if p.IssuedAt == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the time that the token was issued is before the time at which Wings was booted
|
||||
// then the token is invalid for our purposes, even if the token "has permission".
|
||||
if p.IssuedAt.Time.Before(wingsBootTime) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Finally, if the token was issued before a time that is currently denied for this
|
||||
// token instance, ignore the permissions response.
|
||||
if t, ok := denylist.Load(p.JWTID); ok {
|
||||
if p.IssuedAt.Time.Before(t.(time.Time)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if the given token payload has a permission string.
|
||||
func (p *WebsocketPayload) HasPermission(permission string) bool {
|
||||
p.RLock()
|
||||
@@ -38,7 +95,7 @@ func (p *WebsocketPayload) HasPermission(permission string) bool {
|
||||
|
||||
for _, k := range p.Permissions {
|
||||
if k == permission || (!strings.HasPrefix(permission, "admin") && k == "*") {
|
||||
return true
|
||||
return !p.Denylisted()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -45,6 +45,9 @@ var e = []string{
|
||||
server.InstallCompletedEvent,
|
||||
server.DaemonMessageEvent,
|
||||
server.BackupCompletedEvent,
|
||||
server.BackupRestoreCompletedEvent,
|
||||
server.TransferLogsEvent,
|
||||
server.TransferStatusEvent,
|
||||
}
|
||||
|
||||
// Listens for different events happening on a server and sends them along
|
||||
|
||||
@@ -2,19 +2,18 @@ package websocket
|
||||
|
||||
import (
|
||||
"context"
|
||||
"emperror.dev/errors"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/gbrlsnchs/jwt/v3"
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/environment/docker"
|
||||
"github.com/pterodactyl/wings/router/tokens"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -29,14 +28,14 @@ const (
|
||||
PermissionSendPowerRestart = "control.restart"
|
||||
PermissionReceiveErrors = "admin.websocket.errors"
|
||||
PermissionReceiveInstall = "admin.websocket.install"
|
||||
PermissionReceiveTransfer = "admin.websocket.transfer"
|
||||
PermissionReceiveBackups = "backup.read"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
sync.RWMutex
|
||||
|
||||
Connection *websocket.Conn
|
||||
jwt *tokens.WebsocketPayload `json:"-"`
|
||||
sync.RWMutex `json:"-"`
|
||||
Connection *websocket.Conn `json:"-"`
|
||||
jwt *tokens.WebsocketPayload
|
||||
server *server.Server
|
||||
uuid uuid.UUID
|
||||
}
|
||||
@@ -45,12 +44,14 @@ var (
|
||||
ErrJwtNotPresent = errors.New("jwt: no jwt present")
|
||||
ErrJwtNoConnectPerm = errors.New("jwt: missing connect permission")
|
||||
ErrJwtUuidMismatch = errors.New("jwt: server uuid mismatch")
|
||||
ErrJwtOnDenylist = errors.New("jwt: created too far in past (denylist)")
|
||||
)
|
||||
|
||||
func IsJwtError(err error) bool {
|
||||
return errors.Is(err, ErrJwtNotPresent) ||
|
||||
errors.Is(err, ErrJwtNoConnectPerm) ||
|
||||
errors.Is(err, ErrJwtUuidMismatch) ||
|
||||
errors.Is(err, ErrJwtOnDenylist) ||
|
||||
errors.Is(err, jwt.ErrExpValidation)
|
||||
}
|
||||
|
||||
@@ -62,8 +63,12 @@ func NewTokenPayload(token []byte) (*tokens.WebsocketPayload, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if payload.Denylisted() {
|
||||
return nil, ErrJwtOnDenylist
|
||||
}
|
||||
|
||||
if !payload.HasPermission(PermissionConnect) {
|
||||
return nil, errors.New("not authorized to connect to this socket")
|
||||
return nil, ErrJwtNoConnectPerm
|
||||
}
|
||||
|
||||
return &payload, nil
|
||||
@@ -79,19 +84,11 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
|
||||
if o == config.Get().PanelLocation {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, origin := range config.Get().AllowedOrigins {
|
||||
if origin == "*" {
|
||||
if origin == "*" || origin == o {
|
||||
return true
|
||||
}
|
||||
|
||||
if o != origin {
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
},
|
||||
}
|
||||
@@ -103,7 +100,7 @@ func GetHandler(s *server.Server, w http.ResponseWriter, r *http.Request) (*Hand
|
||||
|
||||
u, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Handler{
|
||||
@@ -125,7 +122,6 @@ func (h *Handler) SendJson(v *Message) error {
|
||||
Event: JwtErrorEvent,
|
||||
Args: []string{err.Error()},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -146,6 +142,13 @@ func (h *Handler) SendJson(v *Message) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// If we are sending transfer output, only send it to the user if they have the required permissions.
|
||||
if v.Event == server.TransferLogsEvent {
|
||||
if !j.HasPermission(PermissionReceiveTransfer) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.unsafeSendJson(v); err != nil {
|
||||
@@ -188,6 +191,10 @@ func (h *Handler) TokenValid() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if j.Denylisted() {
|
||||
return ErrJwtOnDenylist
|
||||
}
|
||||
|
||||
if !j.HasPermission(PermissionConnect) {
|
||||
return ErrJwtNoConnectPerm
|
||||
}
|
||||
@@ -204,25 +211,26 @@ func (h *Handler) TokenValid() error {
|
||||
// error message, otherwise we just send back a standard error message.
|
||||
func (h *Handler) SendErrorJson(msg Message, err error, shouldLog ...bool) error {
|
||||
j := h.GetJwt()
|
||||
expected := errors.Is(err, server.ErrSuspended) ||
|
||||
errors.Is(err, server.ErrIsRunning) ||
|
||||
errors.Is(err, filesystem.ErrNotEnoughDiskSpace)
|
||||
isJWTError := IsJwtError(err)
|
||||
|
||||
message := "an unexpected error was encountered while handling this request"
|
||||
if expected || (j != nil && j.HasPermission(PermissionReceiveErrors)) {
|
||||
message = err.Error()
|
||||
wsm := Message{
|
||||
Event: ErrorEvent,
|
||||
Args: []string{"an unexpected error was encountered while handling this request"},
|
||||
}
|
||||
|
||||
m, u := h.GetErrorMessage(message)
|
||||
if isJWTError || (j != nil && j.HasPermission(PermissionReceiveErrors)) {
|
||||
if isJWTError {
|
||||
wsm.Event = JwtErrorEvent
|
||||
}
|
||||
wsm.Args = []string{err.Error()}
|
||||
}
|
||||
|
||||
wsm := Message{Event: ErrorEvent}
|
||||
m, u := h.GetErrorMessage(wsm.Args[0])
|
||||
wsm.Args = []string{m}
|
||||
|
||||
if len(shouldLog) == 0 || (len(shouldLog) == 1 && shouldLog[0] == true) {
|
||||
if !expected && !IsJwtError(err) {
|
||||
if !isJWTError && (len(shouldLog) == 0 || (len(shouldLog) == 1 && shouldLog[0] == true)) {
|
||||
h.server.Log().WithFields(log.Fields{"event": msg.Event, "error_identifier": u.String(), "error": err}).
|
||||
Error("failed to handle websocket process; an error was encountered processing an event")
|
||||
}
|
||||
Errorf("error processing websocket event \"%s\"", msg.Event)
|
||||
}
|
||||
|
||||
return h.unsafeSendJson(wsm)
|
||||
@@ -260,7 +268,6 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
Event: JwtErrorEvent,
|
||||
Args: []string{err.Error()},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -304,7 +311,7 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
|
||||
// On every authentication event, send the current server status back
|
||||
// to the client. :)
|
||||
state := h.server.GetState()
|
||||
state := h.server.Environment.State()
|
||||
h.SendJson(&Message{
|
||||
Event: server.StatusEvent,
|
||||
Args: []string{state},
|
||||
@@ -313,6 +320,7 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
// Only send the current disk usage if the server is offline, if docker container is running,
|
||||
// Environment#EnableResourcePolling() will send this data to all clients.
|
||||
if state == environment.ProcessOfflineState {
|
||||
if !h.server.IsInstalling() && !h.server.IsTransferring() {
|
||||
_ = h.server.Filesystem().HasSpaceAvailable(false)
|
||||
|
||||
b, _ := json.Marshal(h.server.Proc())
|
||||
@@ -321,6 +329,7 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
Args: []string{string(b)},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -361,7 +370,7 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
logs, err := h.server.Environment.Readlog(100)
|
||||
logs, err := h.server.Environment.Readlog(config.Get().System.WebsocketLogCount)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -391,7 +400,7 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if h.server.GetState() == environment.ProcessOfflineState {
|
||||
if h.server.Environment.State() == environment.ProcessOfflineState {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -399,7 +408,7 @@ func (h *Handler) HandleInbound(m Message) error {
|
||||
// so that we can better handle this and only set the environment to booted once we're attached.
|
||||
//
|
||||
// Or maybe just an IsBooted function?
|
||||
if h.server.GetState() == environment.ProcessStartingState {
|
||||
if h.server.Environment.State() == environment.ProcessStartingState {
|
||||
if e, ok := h.server.Environment.(*docker.Environment); ok {
|
||||
if !e.IsAttached() {
|
||||
return nil
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Archiver represents a Server Archiver.
|
||||
type Archiver struct {
|
||||
Server *Server
|
||||
}
|
||||
|
||||
// Path returns the path to the server's archive.
|
||||
func (a *Archiver) Path() string {
|
||||
return filepath.Join(config.Get().System.ArchiveDirectory, a.Name())
|
||||
}
|
||||
|
||||
// Name returns the name of the server's archive.
|
||||
func (a *Archiver) Name() string {
|
||||
return a.Server.Id() + ".tar.gz"
|
||||
}
|
||||
|
||||
// Exists returns a boolean based off if the archive exists.
|
||||
func (a *Archiver) Exists() bool {
|
||||
if _, err := os.Stat(a.Path()); os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Stat stats the archive file.
|
||||
func (a *Archiver) Stat() (*filesystem.Stat, error) {
|
||||
s, err := os.Stat(a.Path())
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return &filesystem.Stat{
|
||||
Info: s,
|
||||
Mimetype: "application/tar+gzip",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Archive creates an archive of the server and deletes the previous one.
|
||||
func (a *Archiver) Archive() error {
|
||||
path := a.Server.Filesystem().Path()
|
||||
|
||||
// Get the list of root files and directories to archive.
|
||||
var files []string
|
||||
fileInfo, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range fileInfo {
|
||||
f := filepath.Join(path, file.Name())
|
||||
// If the file is a symlink we cannot safely assume that the result of a filepath.Join() will be
|
||||
// a safe destination. We need to check if the file is a symlink, and if so pass off to the SafePath
|
||||
// function to resolve it to the final destination.
|
||||
//
|
||||
// ioutil.ReadDir() calls Lstat, so this will work correctly. If it did not call Lstat, but rather
|
||||
// just did a normal Stat call, this would fail since that would be looking at the symlink destination
|
||||
// and not the actual file in this listing.
|
||||
if file.Mode()&os.ModeSymlink != 0 {
|
||||
f, err = a.Server.Filesystem().SafePath(filepath.Join(path, file.Name()))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
files = append(files, f)
|
||||
}
|
||||
|
||||
if err := a.DeleteIfExists(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return archiver.NewTarGz().Archive(files, a.Path())
|
||||
}
|
||||
|
||||
// DeleteIfExists deletes the archive if it exists.
|
||||
func (a *Archiver) DeleteIfExists() error {
|
||||
if _, err := a.Stat(); err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Remove(a.Path()); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checksum computes a SHA256 checksum of the server's archive.
|
||||
func (a *Archiver) Checksum() (string, error) {
|
||||
file, err := os.Open(a.Path())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
hash := sha256.New()
|
||||
|
||||
buf := make([]byte, 1024*4)
|
||||
if _, err := io.CopyBuffer(hash, file, buf); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
||||
152
server/backup.go
152
server/backup.go
@@ -1,22 +1,23 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
)
|
||||
|
||||
// Notifies the panel of a backup's state and returns an error if one is encountered
|
||||
// while performing this action.
|
||||
func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, successful bool) error {
|
||||
r := api.NewRequester()
|
||||
rerr, err := r.SendBackupStatus(uuid, ad.ToRequest(successful))
|
||||
if rerr != nil || err != nil {
|
||||
if err != nil {
|
||||
if err := s.client.SetBackupStatus(s.Context(), uuid, ad.ToRequest(successful)); err != nil {
|
||||
if !remote.IsRequestError(err) {
|
||||
s.Log().WithFields(log.Fields{
|
||||
"backup": uuid,
|
||||
"error": err,
|
||||
@@ -25,74 +26,58 @@ func (s *Server) notifyPanelOfBackup(uuid string, ad *backup.ArchiveDetails, suc
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New(rerr.String())
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get all of the ignored files for a server based on its .pteroignore file in the root.
|
||||
func (s *Server) getServerwideIgnoredFiles() ([]string, error) {
|
||||
var ignored []string
|
||||
|
||||
f, err := os.Open(path.Join(s.Filesystem().Path(), ".pteroignore"))
|
||||
func (s *Server) getServerwideIgnoredFiles() (string, error) {
|
||||
f, st, err := s.Filesystem().File(".pteroignore")
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return "", nil
|
||||
}
|
||||
} else {
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
// Only include non-empty lines, for the sake of clarity...
|
||||
if t := scanner.Text(); t != "" {
|
||||
ignored = append(ignored, t)
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
if st.Mode()&os.ModeSymlink != 0 || st.Size() > 32*1024 {
|
||||
// Don't read a symlinked ignore file, or a file larger than 32KiB in size.
|
||||
return "", nil
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ignored, nil
|
||||
}
|
||||
|
||||
// Get the backup files to include when generating it.
|
||||
func (s *Server) GetIncludedBackupFiles(ignored []string) (*backup.IncludedFiles, error) {
|
||||
// If no ignored files are present in the request, check for a .pteroignore file in the root
|
||||
// of the server files directory, and use that to generate the backup.
|
||||
if len(ignored) == 0 {
|
||||
// Backup performs a server backup and then emits the event over the server
|
||||
// websocket. We let the actual backup system handle notifying the panel of the
|
||||
// status, but that won't emit a websocket event.
|
||||
func (s *Server) Backup(b backup.BackupInterface) error {
|
||||
ignored := b.Ignored()
|
||||
if b.Ignored() == "" {
|
||||
if i, err := s.getServerwideIgnoredFiles(); err != nil {
|
||||
s.Log().WithField("error", err).Warn("failed to retrieve ignored files listing for server")
|
||||
log.WithField("server", s.Id()).WithField("error", err).Warn("failed to get server-wide ignored files")
|
||||
} else {
|
||||
ignored = i
|
||||
}
|
||||
}
|
||||
|
||||
// Get the included files based on the root path and the ignored files provided.
|
||||
return s.Filesystem().GetIncludedFiles(s.Filesystem().Path(), ignored)
|
||||
}
|
||||
|
||||
// Performs a server backup and then emits the event over the server websocket. We
|
||||
// let the actual backup system handle notifying the panel of the status, but that
|
||||
// won't emit a websocket event.
|
||||
func (s *Server) Backup(b backup.BackupInterface) error {
|
||||
// Get the included files based on the root path and the ignored files provided.
|
||||
inc, err := s.GetIncludedBackupFiles(b.Ignored())
|
||||
ad, err := b.Generate(s.Filesystem().Path(), ignored)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
ad, err := b.Generate(inc, s.Filesystem().Path())
|
||||
if err != nil {
|
||||
if notifyError := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); notifyError != nil {
|
||||
if err := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); err != nil {
|
||||
s.Log().WithFields(log.Fields{
|
||||
"backup": b.Identifier(),
|
||||
"error": notifyError,
|
||||
"error": err,
|
||||
}).Warn("failed to notify panel of failed backup state")
|
||||
} else {
|
||||
s.Log().WithField("backup", b.Identifier()).Info("notified panel of failed backup state")
|
||||
}
|
||||
|
||||
s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
||||
_ = s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
||||
"uuid": b.Identifier(),
|
||||
"is_successful": false,
|
||||
"checksum": "",
|
||||
@@ -100,20 +85,23 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
||||
"file_size": 0,
|
||||
})
|
||||
|
||||
return errors.Wrap(err, "error while generating server backup")
|
||||
return errors.WrapIf(err, "backup: error while generating server backup")
|
||||
}
|
||||
|
||||
// Try to notify the panel about the status of this backup. If for some reason this request
|
||||
// fails, delete the archive from the daemon and return that error up the chain to the caller.
|
||||
if notifyError := s.notifyPanelOfBackup(b.Identifier(), ad, true); notifyError != nil {
|
||||
b.Remove()
|
||||
_ = b.Remove()
|
||||
|
||||
return notifyError
|
||||
s.Log().WithField("error", notifyError).Info("failed to notify panel of successful backup state")
|
||||
return err
|
||||
} else {
|
||||
s.Log().WithField("backup", b.Identifier()).Info("notified panel of successful backup state")
|
||||
}
|
||||
|
||||
// Emit an event over the socket so we can update the backup in realtime on
|
||||
// the frontend for the server.
|
||||
s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
||||
_ = s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
||||
"uuid": b.Identifier(),
|
||||
"is_successful": true,
|
||||
"checksum": ad.Checksum,
|
||||
@@ -123,3 +111,49 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestoreBackup calls the Restore function on the provided backup. Once this
|
||||
// restoration is completed an event is emitted to the websocket to notify the
|
||||
// Panel that is has been completed.
|
||||
//
|
||||
// In addition to the websocket event an API call is triggered to notify the
|
||||
// Panel of the new state.
|
||||
func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (err error) {
|
||||
s.Config().SetSuspended(true)
|
||||
// Local backups will not pass a reader through to this function, so check first
|
||||
// to make sure it is a valid reader before trying to close it.
|
||||
defer func() {
|
||||
s.Config().SetSuspended(false)
|
||||
if reader != nil {
|
||||
reader.Close()
|
||||
}
|
||||
}()
|
||||
// Send an API call to the Panel as soon as this function is done running so that
|
||||
// the Panel is informed of the restoration status of this backup.
|
||||
defer func() {
|
||||
if rerr := s.client.SendRestorationStatus(s.Context(), b.Identifier(), err == nil); rerr != nil {
|
||||
s.Log().WithField("error", rerr).WithField("backup", b.Identifier()).Error("failed to notify Panel of backup restoration status")
|
||||
}
|
||||
}()
|
||||
|
||||
// Don't try to restore the server until we have completely stopped the running
|
||||
// instance, otherwise you'll likely hit all types of write errors due to the
|
||||
// server being suspended.
|
||||
if s.Environment.State() != environment.ProcessOfflineState {
|
||||
if err = s.Environment.WaitForStop(120, false); err != nil {
|
||||
if !client.IsErrNotFound(err) {
|
||||
return errors.WrapIf(err, "server/backup: restore: failed to wait for container stop")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to restore the backup to the server by running through each entry
|
||||
// in the file one at a time and writing them to the disk.
|
||||
s.Log().Debug("starting file writing process for backup restoration")
|
||||
err = b.Restore(reader, func(file string, r io.Reader) error {
|
||||
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
|
||||
return s.Filesystem().Writefile(file, r)
|
||||
})
|
||||
|
||||
return errors.WithStackIf(err)
|
||||
}
|
||||
|
||||
@@ -1,132 +0,0 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"github.com/apex/log"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/remeh/sizedwaitgroup"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Archive struct {
|
||||
sync.Mutex
|
||||
|
||||
TrimPrefix string
|
||||
Files *IncludedFiles
|
||||
}
|
||||
|
||||
// Creates an archive at dst with all of the files defined in the included files struct.
|
||||
func (a *Archive) Create(dst string, ctx context.Context) error {
|
||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
maxCpu := runtime.NumCPU() / 2
|
||||
if maxCpu > 4 {
|
||||
maxCpu = 4
|
||||
}
|
||||
|
||||
gzw, _ := gzip.NewWriterLevel(f, gzip.BestSpeed)
|
||||
_ = gzw.SetConcurrency(1<<20, maxCpu)
|
||||
|
||||
defer gzw.Flush()
|
||||
defer gzw.Close()
|
||||
|
||||
tw := tar.NewWriter(gzw)
|
||||
defer tw.Flush()
|
||||
defer tw.Close()
|
||||
|
||||
wg := sizedwaitgroup.New(10)
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
// Iterate over all of the files to be included and put them into the archive. This is
|
||||
// done as a concurrent goroutine to speed things along. If an error is encountered at
|
||||
// any step, the entire process is aborted.
|
||||
for _, p := range a.Files.All() {
|
||||
p := p
|
||||
g.Go(func() error {
|
||||
wg.Add()
|
||||
defer wg.Done()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.WithStack(ctx.Err())
|
||||
default:
|
||||
return a.addToArchive(p, tw)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Block until the entire routine is completed.
|
||||
if err := g.Wait(); err != nil {
|
||||
f.Close()
|
||||
|
||||
// Attempt to remove the archive if there is an error, report that error to
|
||||
// the logger if it fails.
|
||||
if rerr := os.Remove(dst); rerr != nil && !os.IsNotExist(rerr) {
|
||||
log.WithField("location", dst).Warn("failed to delete corrupted backup archive")
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Adds a single file to the existing tar archive writer.
|
||||
func (a *Archive) addToArchive(p string, w *tar.Writer) error {
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
// If you try to backup something that no longer exists (got deleted somewhere during the process
|
||||
// but not by this process), just skip over it and don't kill the entire backup.
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s, err := f.Stat()
|
||||
if err != nil {
|
||||
// Same as above, don't kill the process just because the file no longer exists.
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
header := &tar.Header{
|
||||
// Trim the long server path from the name of the file so that the resulting
|
||||
// archive is exactly how the user would see it in the panel file manager.
|
||||
Name: strings.TrimPrefix(p, a.TrimPrefix),
|
||||
Size: s.Size(),
|
||||
Mode: int64(s.Mode()),
|
||||
ModTime: s.ModTime(),
|
||||
}
|
||||
|
||||
// These actions must occur sequentially, even if this function is called multiple
|
||||
// in parallel. You'll get some nasty panic's otherwise.
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
if err := w.WriteHeader(header); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 4*1024)
|
||||
if _, err := io.CopyBuffer(w, f, buf); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -3,30 +3,36 @@ package backup
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/apex/log"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
)
|
||||
|
||||
type AdapterType string
|
||||
|
||||
const (
|
||||
LocalBackupAdapter = "wings"
|
||||
S3BackupAdapter = "s3"
|
||||
LocalBackupAdapter AdapterType = "wings"
|
||||
S3BackupAdapter AdapterType = "s3"
|
||||
)
|
||||
|
||||
// RestoreCallback is a generic restoration callback that exists for both local
|
||||
// and remote backups allowing the files to be restored.
|
||||
type RestoreCallback func(file string, r io.Reader) error
|
||||
|
||||
type ArchiveDetails struct {
|
||||
Checksum string `json:"checksum"`
|
||||
ChecksumType string `json:"checksum_type"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// Returns a request object.
|
||||
func (ad *ArchiveDetails) ToRequest(successful bool) api.BackupRequest {
|
||||
return api.BackupRequest{
|
||||
// ToRequest returns a request object.
|
||||
func (ad *ArchiveDetails) ToRequest(successful bool) remote.BackupRequest {
|
||||
return remote.BackupRequest{
|
||||
Checksum: ad.Checksum,
|
||||
ChecksumType: ad.ChecksumType,
|
||||
Size: ad.Size,
|
||||
@@ -41,37 +47,48 @@ type Backup struct {
|
||||
|
||||
// An array of files to ignore when generating this backup. This should be
|
||||
// compatible with a standard .gitignore structure.
|
||||
IgnoredFiles []string `json:"ignored_files"`
|
||||
Ignore string `json:"ignore"`
|
||||
|
||||
client remote.Client
|
||||
adapter AdapterType
|
||||
logContext map[string]interface{}
|
||||
}
|
||||
|
||||
// noinspection GoNameStartsWithPackageName
|
||||
type BackupInterface interface {
|
||||
// Returns the UUID of this backup as tracked by the panel instance.
|
||||
// SetClient sets the API request client on the backup interface.
|
||||
SetClient(c remote.Client)
|
||||
// Identifier returns the UUID of this backup as tracked by the panel
|
||||
// instance.
|
||||
Identifier() string
|
||||
|
||||
// Generates a backup in whatever the configured source for the specific
|
||||
// implementation is.
|
||||
Generate(*IncludedFiles, string) (*ArchiveDetails, error)
|
||||
|
||||
// Returns the ignored files for this backup instance.
|
||||
Ignored() []string
|
||||
|
||||
// Returns a SHA256 checksum for the generated backup.
|
||||
// WithLogContext attaches additional context to the log output for this
|
||||
// backup.
|
||||
WithLogContext(map[string]interface{})
|
||||
// Generate creates a backup in whatever the configured source for the
|
||||
// specific implementation is.
|
||||
Generate(string, string) (*ArchiveDetails, error)
|
||||
// Ignored returns the ignored files for this backup instance.
|
||||
Ignored() string
|
||||
// Checksum returns a SHA1 checksum for the generated backup.
|
||||
Checksum() ([]byte, error)
|
||||
|
||||
// Returns the size of the generated backup.
|
||||
// Size returns the size of the generated backup.
|
||||
Size() (int64, error)
|
||||
|
||||
// Returns the path to the backup on the machine. This is not always the final
|
||||
// storage location of the backup, simply the location we're using to store
|
||||
// it until it is moved to the final spot.
|
||||
// Path returns the path to the backup on the machine. This is not always
|
||||
// the final storage location of the backup, simply the location we're using
|
||||
// to store it until it is moved to the final spot.
|
||||
Path() string
|
||||
|
||||
// Returns details about the archive.
|
||||
// Details returns details about the archive.
|
||||
Details() *ArchiveDetails
|
||||
|
||||
// Removes a backup file.
|
||||
// Remove removes a backup file.
|
||||
Remove() error
|
||||
// Restore is called when a backup is ready to be restored to the disk from
|
||||
// the given source. Not every backup implementation will support this nor
|
||||
// will every implementation require a reader be provided.
|
||||
Restore(reader io.Reader, callback RestoreCallback) error
|
||||
}
|
||||
|
||||
func (b *Backup) SetClient(c remote.Client) {
|
||||
b.client = c
|
||||
}
|
||||
|
||||
func (b *Backup) Identifier() string {
|
||||
@@ -87,7 +104,7 @@ func (b *Backup) Path() string {
|
||||
func (b *Backup) Size() (int64, error) {
|
||||
st, err := os.Stat(b.Path())
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return st.Size(), nil
|
||||
@@ -99,7 +116,7 @@ func (b *Backup) Checksum() ([]byte, error) {
|
||||
|
||||
f, err := os.Open(b.Path())
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
@@ -117,20 +134,25 @@ func (b *Backup) Details() *ArchiveDetails {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
l := log.WithField("backup_id", b.Uuid)
|
||||
|
||||
var checksum string
|
||||
// Calculate the checksum for the file.
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
l.Info("computing checksum for backup...")
|
||||
resp, err := b.Checksum()
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"backup": b.Identifier(),
|
||||
"error": err,
|
||||
}).Error("failed to calculate checksum for backup")
|
||||
return
|
||||
}
|
||||
|
||||
checksum = hex.EncodeToString(resp)
|
||||
l.WithField("checksum", checksum).Info("computed checksum for backup")
|
||||
}()
|
||||
|
||||
var sz int64
|
||||
@@ -153,6 +175,16 @@ func (b *Backup) Details() *ArchiveDetails {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Backup) Ignored() []string {
|
||||
return b.IgnoredFiles
|
||||
func (b *Backup) Ignored() string {
|
||||
return b.Ignore
|
||||
}
|
||||
|
||||
// Returns a logger instance for this backup with the additional context fields
|
||||
// assigned to the output.
|
||||
func (b *Backup) log() *log.Entry {
|
||||
l := log.WithField("backup", b.Identifier()).WithField("adapter", b.adapter)
|
||||
for k, v := range b.logContext {
|
||||
l = l.WithField(k, v)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/pkg/errors"
|
||||
"errors"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
type LocalBackup struct {
|
||||
@@ -12,19 +17,24 @@ type LocalBackup struct {
|
||||
|
||||
var _ BackupInterface = (*LocalBackup)(nil)
|
||||
|
||||
// Locates the backup for a server and returns the local path. This will obviously only
|
||||
// work if the backup was created as a local backup.
|
||||
func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
||||
b := &LocalBackup{
|
||||
func NewLocal(client remote.Client, uuid string, ignore string) *LocalBackup {
|
||||
return &LocalBackup{
|
||||
Backup{
|
||||
client: client,
|
||||
Uuid: uuid,
|
||||
IgnoredFiles: nil,
|
||||
Ignore: ignore,
|
||||
adapter: LocalBackupAdapter,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// LocateLocal finds the backup for a server and returns the local path. This
|
||||
// will obviously only work if the backup was created as a local backup.
|
||||
func LocateLocal(client remote.Client, uuid string) (*LocalBackup, os.FileInfo, error) {
|
||||
b := NewLocal(client, uuid, "")
|
||||
st, err := os.Stat(b.Path())
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithStack(err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if st.IsDir() {
|
||||
@@ -34,22 +44,44 @@ func LocateLocal(uuid string) (*LocalBackup, os.FileInfo, error) {
|
||||
return b, st, nil
|
||||
}
|
||||
|
||||
// Removes a backup from the system.
|
||||
// Remove removes a backup from the system.
|
||||
func (b *LocalBackup) Remove() error {
|
||||
return os.Remove(b.Path())
|
||||
}
|
||||
|
||||
// Generates a backup of the selected files and pushes it to the defined location
|
||||
// for this instance.
|
||||
func (b *LocalBackup) Generate(included *IncludedFiles, prefix string) (*ArchiveDetails, error) {
|
||||
a := &Archive{
|
||||
TrimPrefix: prefix,
|
||||
Files: included,
|
||||
// WithLogContext attaches additional context to the log output for this backup.
|
||||
func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
|
||||
b.logContext = c
|
||||
}
|
||||
|
||||
if err := a.Create(b.Path(), context.Background()); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
// Generate generates a backup of the selected files and pushes it to the
|
||||
// defined location for this instance.
|
||||
func (b *LocalBackup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
|
||||
a := &filesystem.Archive{
|
||||
BasePath: basePath,
|
||||
Ignore: ignore,
|
||||
}
|
||||
|
||||
b.log().Info("creating backup for server...")
|
||||
if err := a.Create(b.Path()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.log().Info("created backup successfully")
|
||||
|
||||
return b.Details(), nil
|
||||
}
|
||||
|
||||
// Restore will walk over the archive and call the callback function for each
|
||||
// file encountered.
|
||||
func (b *LocalBackup) Restore(_ io.Reader, callback RestoreCallback) error {
|
||||
return archiver.Walk(b.Path(), func(f archiver.File) error {
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name, err := system.ExtractArchiveSourceName(f, "/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(name, f)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Adapter string `json:"adapter"`
|
||||
Uuid string `json:"uuid"`
|
||||
IgnoredFiles []string `json:"ignored_files"`
|
||||
PresignedUrl string `json:"presigned_url"`
|
||||
}
|
||||
|
||||
// Generates a new local backup struct.
|
||||
func (r *Request) NewLocalBackup() (*LocalBackup, error) {
|
||||
if r.Adapter != LocalBackupAdapter {
|
||||
return nil, errors.New(fmt.Sprintf("cannot create local backup using [%s] adapter", r.Adapter))
|
||||
}
|
||||
|
||||
return &LocalBackup{
|
||||
Backup{
|
||||
Uuid: r.Uuid,
|
||||
IgnoredFiles: r.IgnoredFiles,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Generates a new S3 backup struct.
|
||||
func (r *Request) NewS3Backup() (*S3Backup, error) {
|
||||
if r.Adapter != S3BackupAdapter {
|
||||
return nil, errors.New(fmt.Sprintf("cannot create s3 backup using [%s] adapter", r.Adapter))
|
||||
}
|
||||
|
||||
if len(r.PresignedUrl) == 0 {
|
||||
return nil, errors.New("a valid presigned S3 upload URL must be provided to use the [s3] adapter")
|
||||
}
|
||||
|
||||
return &S3Backup{
|
||||
Backup: Backup{
|
||||
Uuid: r.Uuid,
|
||||
IgnoredFiles: r.IgnoredFiles,
|
||||
},
|
||||
PresignedUrl: r.PresignedUrl,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,87 +1,195 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
)
|
||||
|
||||
type S3Backup struct {
|
||||
Backup
|
||||
|
||||
// The pre-signed upload endpoint for the generated backup. This must be
|
||||
// provided otherwise this request will fail. This allows us to keep all
|
||||
// of the keys off the daemon instances and the panel can handle generating
|
||||
// the credentials for us.
|
||||
PresignedUrl string
|
||||
}
|
||||
|
||||
var _ BackupInterface = (*S3Backup)(nil)
|
||||
|
||||
// Generates a new backup on the disk, moves it into the S3 bucket via the provided
|
||||
// presigned URL, and then deletes the backup from the disk.
|
||||
func (s *S3Backup) Generate(included *IncludedFiles, prefix string) (*ArchiveDetails, error) {
|
||||
defer s.Remove()
|
||||
|
||||
a := &Archive{
|
||||
TrimPrefix: prefix,
|
||||
Files: included,
|
||||
}
|
||||
|
||||
if err := a.Create(s.Path(), context.Background()); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
rc, err := os.Open(s.Path())
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
if resp, err := s.generateRemoteRequest(rc); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
} else {
|
||||
resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed to put S3 object, %d:%s", resp.StatusCode, resp.Status)
|
||||
func NewS3(client remote.Client, uuid string, ignore string) *S3Backup {
|
||||
return &S3Backup{
|
||||
Backup{
|
||||
client: client,
|
||||
Uuid: uuid,
|
||||
Ignore: ignore,
|
||||
adapter: S3BackupAdapter,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return s.Details(), err
|
||||
}
|
||||
|
||||
// Removes a backup from the system.
|
||||
// Remove removes a backup from the system.
|
||||
func (s *S3Backup) Remove() error {
|
||||
return os.Remove(s.Path())
|
||||
}
|
||||
|
||||
// Generates the remote S3 request and begins the upload.
|
||||
func (s *S3Backup) generateRemoteRequest(rc io.ReadCloser) (*http.Response, error) {
|
||||
r, err := http.NewRequest(http.MethodPut, s.PresignedUrl, nil)
|
||||
// WithLogContext attaches additional context to the log output for this backup.
|
||||
func (s *S3Backup) WithLogContext(c map[string]interface{}) {
|
||||
s.logContext = c
|
||||
}
|
||||
|
||||
// Generate creates a new backup on the disk, moves it into the S3 bucket via
|
||||
// the provided presigned URL, and then deletes the backup from the disk.
|
||||
func (s *S3Backup) Generate(basePath, ignore string) (*ArchiveDetails, error) {
|
||||
defer s.Remove()
|
||||
|
||||
a := &filesystem.Archive{
|
||||
BasePath: basePath,
|
||||
Ignore: ignore,
|
||||
}
|
||||
|
||||
s.log().Info("creating backup for server...")
|
||||
if err := a.Create(s.Path()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.log().Info("created backup successfully")
|
||||
|
||||
rc, err := os.Open(s.Path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
if sz, err := s.Size(); err != nil {
|
||||
if err := s.generateRemoteRequest(rc); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
r.ContentLength = sz
|
||||
r.Header.Add("Content-Length", strconv.Itoa(int(sz)))
|
||||
}
|
||||
|
||||
return s.Details(), nil
|
||||
}
|
||||
|
||||
// Reader provides a wrapper around an existing io.Reader
|
||||
// but implements io.Closer in order to satisfy an io.ReadCloser.
|
||||
type Reader struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (Reader) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generates the remote S3 request and begins the upload.
|
||||
func (s *S3Backup) generateRemoteRequest(rc io.ReadCloser) error {
|
||||
defer rc.Close()
|
||||
|
||||
s.log().Debug("attempting to get size of backup...")
|
||||
size, err := s.Backup.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.log().WithField("size", size).Debug("got size of backup")
|
||||
|
||||
s.log().Debug("attempting to get S3 upload urls from Panel...")
|
||||
urls, err := s.client.GetBackupRemoteUploadURLs(context.Background(), s.Backup.Uuid, size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.log().Debug("got S3 upload urls from the Panel")
|
||||
s.log().WithField("parts", len(urls.Parts)).Info("attempting to upload backup to s3 endpoint...")
|
||||
|
||||
handlePart := func(part string, size int64) (string, error) {
|
||||
r, err := http.NewRequest(http.MethodPut, part, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
r.ContentLength = size
|
||||
r.Header.Add("Content-Length", strconv.Itoa(int(size)))
|
||||
r.Header.Add("Content-Type", "application/x-gzip")
|
||||
|
||||
// Limit the reader to the size of the part.
|
||||
r.Body = Reader{Reader: io.LimitReader(rc, size)}
|
||||
|
||||
// This http request can block forever due to it not having a timeout,
|
||||
// but we are uploading up to 5GB of data, so there is not really
|
||||
// a good way to handle a timeout on this.
|
||||
res, err := http.DefaultClient.Do(r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
// Handle non-200 status codes.
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("failed to put S3 object part, %d:%s", res.StatusCode, res.Status)
|
||||
}
|
||||
|
||||
r.Body = rc
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"endpoint": s.PresignedUrl,
|
||||
"headers": r.Header,
|
||||
}).Debug("uploading backup to remote S3 endpoint")
|
||||
|
||||
return http.DefaultClient.Do(r)
|
||||
// Get the ETag from the uploaded part, this should be sent with the CompleteMultipartUpload request.
|
||||
return res.Header.Get("ETag"), nil
|
||||
}
|
||||
|
||||
for i, part := range urls.Parts {
|
||||
// Get the size for the current part.
|
||||
var partSize int64
|
||||
if i+1 < len(urls.Parts) {
|
||||
partSize = urls.PartSize
|
||||
} else {
|
||||
// This is the remaining size for the last part,
|
||||
// there is not a minimum size limit for the last part.
|
||||
partSize = size - (int64(i) * urls.PartSize)
|
||||
}
|
||||
|
||||
// Attempt to upload the part.
|
||||
if _, err := handlePart(part, partSize); err != nil {
|
||||
s.log().WithField("part_id", i+1).WithError(err).Warn("failed to upload part")
|
||||
return err
|
||||
}
|
||||
|
||||
s.log().WithField("part_id", i+1).Info("successfully uploaded backup part")
|
||||
}
|
||||
|
||||
s.log().WithField("parts", len(urls.Parts)).Info("backup has been successfully uploaded")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restore will read from the provided reader assuming that it is a gzipped
|
||||
// tar reader. When a file is encountered in the archive the callback function
|
||||
// will be triggered. If the callback returns an error the entire process is
|
||||
// stopped, otherwise this function will run until all files have been written.
|
||||
//
|
||||
// This restoration uses a workerpool to use up to the number of CPUs available
|
||||
// on the machine when writing files to the disk.
|
||||
func (s *S3Backup) Restore(r io.Reader, callback RestoreCallback) error {
|
||||
reader := r
|
||||
// Steal the logic we use for making backups which will be applied when restoring
|
||||
// this specific backup. This allows us to prevent overloading the disk unintentionally.
|
||||
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
||||
reader = ratelimit.Reader(r, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
||||
}
|
||||
gr, err := gzip.NewReader(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gr.Close()
|
||||
tr := tar.NewReader(gr)
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
if err := callback(header.Name, tr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type IncludedFiles struct {
|
||||
sync.RWMutex
|
||||
files []string
|
||||
}
|
||||
|
||||
// Pushes an additional file or folder onto the struct.
|
||||
func (i *IncludedFiles) Push(p string) {
|
||||
i.Lock()
|
||||
i.files = append(i.files, p) // ~~
|
||||
i.Unlock()
|
||||
}
|
||||
|
||||
// Returns all of the files that were marked as being included.
|
||||
func (i *IncludedFiles) All() []string {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
|
||||
return i.files
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package server
|
||||
|
||||
import "sync"
|
||||
|
||||
type Collection struct {
|
||||
items []*Server
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// Create a new collection from a slice of servers.
|
||||
func NewCollection(servers []*Server) *Collection {
|
||||
return &Collection{
|
||||
items: servers,
|
||||
}
|
||||
}
|
||||
|
||||
// Return all of the items in the collection.
|
||||
func (c *Collection) All() []*Server {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return c.items
|
||||
}
|
||||
|
||||
// Adds an item to the collection store.
|
||||
func (c *Collection) Add(s *Server) {
|
||||
c.Lock()
|
||||
c.items = append(c.items, s)
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// Returns only those items matching the filter criteria.
|
||||
func (c *Collection) Filter(filter func(*Server) bool) []*Server {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
r := make([]*Server, 0)
|
||||
for _, v := range c.items {
|
||||
if filter(v) {
|
||||
r = append(r, v)
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Returns a single element from the collection matching the filter. If nothing is
|
||||
// found a nil result is returned.
|
||||
func (c *Collection) Find(filter func(*Server) bool) *Server {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
for _, v := range c.items {
|
||||
if filter(v) {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes all items from the collection that match the filter function.
|
||||
func (c *Collection) Remove(filter func(*Server) bool) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
r := make([]*Server, 0)
|
||||
for _, v := range c.items {
|
||||
if !filter(v) {
|
||||
r = append(r, v)
|
||||
}
|
||||
}
|
||||
|
||||
c.items = r
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/gammazero/workerpool"
|
||||
"runtime"
|
||||
|
||||
"github.com/gammazero/workerpool"
|
||||
)
|
||||
|
||||
// Parent function that will update all of the defined configuration files for a server
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"sync"
|
||||
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
)
|
||||
|
||||
type EggConfiguration struct {
|
||||
// The internal UUID of the Egg on the Panel.
|
||||
ID string
|
||||
|
||||
// Maintains a list of files that are blacklisted for opening/editing/downloading
|
||||
// or basically any type of access on the server by any user. This is NOT the same
|
||||
// as a per-user denylist, this is defined at the Egg level.
|
||||
FileDenylist []string `json:"file_denylist"`
|
||||
}
|
||||
|
||||
type Configuration struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
@@ -33,6 +44,7 @@ type Configuration struct {
|
||||
CrashDetectionEnabled bool `default:"true" json:"enabled" yaml:"enabled"`
|
||||
Mounts []Mount `json:"mounts"`
|
||||
Resources ResourceUsage `json:"resources"`
|
||||
Egg EggConfiguration `json:"egg,omitempty"`
|
||||
|
||||
Container struct {
|
||||
// Defines the Docker image that will be used for this server
|
||||
|
||||
@@ -3,13 +3,14 @@ package server
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/mitchellh/colorstring"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/mitchellh/colorstring"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
var ErrTooMuchConsoleData = errors.New("console is outputting too much data")
|
||||
@@ -26,7 +27,7 @@ type ConsoleThrottler struct {
|
||||
|
||||
// Wether or not the console output is being throttled. It is up to calling code to
|
||||
// determine what to do if it is.
|
||||
isThrottled system.AtomicBool
|
||||
isThrottled *system.AtomicBool
|
||||
|
||||
// The total number of lines processed so far during the given time period.
|
||||
timerCancel *context.CancelFunc
|
||||
@@ -36,7 +37,7 @@ type ConsoleThrottler struct {
|
||||
func (ct *ConsoleThrottler) Reset() {
|
||||
atomic.StoreUint64(&ct.count, 0)
|
||||
atomic.StoreUint64(&ct.activations, 0)
|
||||
ct.isThrottled.Set(false)
|
||||
ct.isThrottled.Store(false)
|
||||
}
|
||||
|
||||
// Triggers an activation for a server. You can also decrement the number of activations
|
||||
@@ -57,55 +58,21 @@ func (ct *ConsoleThrottler) markActivation(increment bool) uint64 {
|
||||
// Determines if the console is currently being throttled. Calls to this function can be used to
|
||||
// determine if output should be funneled along to the websocket processes.
|
||||
func (ct *ConsoleThrottler) Throttled() bool {
|
||||
return ct.isThrottled.Get()
|
||||
return ct.isThrottled.Load()
|
||||
}
|
||||
|
||||
// Starts a timer that runs in a seperate thread and will continually decrement the lines processed
|
||||
// and number of activations, regardless of the current console message volume.
|
||||
func (ct *ConsoleThrottler) StartTimer() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
reset := time.NewTicker(time.Duration(int64(ct.LineResetInterval)) * time.Millisecond)
|
||||
decay := time.NewTicker(time.Duration(int64(ct.DecayInterval)) * time.Millisecond)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
reset.Stop()
|
||||
return
|
||||
case <-reset.C:
|
||||
ct.isThrottled.Set(false)
|
||||
// and number of activations, regardless of the current console message volume. All of the timers
|
||||
// are canceled if the context passed through is canceled.
|
||||
func (ct *ConsoleThrottler) StartTimer(ctx context.Context) {
|
||||
system.Every(ctx, time.Duration(int64(ct.LineResetInterval))*time.Millisecond, func(_ time.Time) {
|
||||
ct.isThrottled.Store(false)
|
||||
atomic.StoreUint64(&ct.count, 0)
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
decay.Stop()
|
||||
return
|
||||
case <-decay.C:
|
||||
system.Every(ctx, time.Duration(int64(ct.DecayInterval))*time.Millisecond, func(_ time.Time) {
|
||||
ct.markActivation(false)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ct.timerCancel = &cancel
|
||||
}
|
||||
|
||||
// Stops a running timer processes if one exists. This is only called when the server is deleted since
|
||||
// we want this to always be running. If there is no process currently running nothing will really happen.
|
||||
func (ct *ConsoleThrottler) StopTimer() {
|
||||
ct.mu.Lock()
|
||||
defer ct.mu.Unlock()
|
||||
if ct.timerCancel != nil {
|
||||
c := *ct.timerCancel
|
||||
c()
|
||||
ct.timerCancel = nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Handles output from a server's console. This code ensures that a server is not outputting
|
||||
@@ -133,7 +100,7 @@ func (ct *ConsoleThrottler) Increment(onTrigger func()) error {
|
||||
// activation. Once the throttle is triggered and has passed the kill at value we will trigger a server
|
||||
// stop automatically.
|
||||
if atomic.AddUint64(&ct.count, 1) >= ct.Lines && !ct.Throttled() {
|
||||
ct.isThrottled.Set(true)
|
||||
ct.isThrottled.Store(true)
|
||||
if ct.markActivation(true) >= ct.MaximumTriggerCount {
|
||||
return ErrTooMuchConsoleData
|
||||
}
|
||||
@@ -146,15 +113,12 @@ func (ct *ConsoleThrottler) Increment(onTrigger func()) error {
|
||||
|
||||
// Returns the throttler instance for the server or creates a new one.
|
||||
func (s *Server) Throttler() *ConsoleThrottler {
|
||||
s.throttleLock.Lock()
|
||||
defer s.throttleLock.Unlock()
|
||||
|
||||
if s.throttler == nil {
|
||||
s.throttleOnce.Do(func() {
|
||||
s.throttler = &ConsoleThrottler{
|
||||
isThrottled: system.NewAtomicBool(false),
|
||||
ConsoleThrottles: config.Get().Throttles,
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
return s.throttler
|
||||
}
|
||||
|
||||
|
||||
@@ -2,11 +2,12 @@ package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
)
|
||||
|
||||
type CrashHandler struct {
|
||||
@@ -45,11 +46,10 @@ func (s *Server) handleServerCrash() error {
|
||||
// No point in doing anything here if the server isn't currently offline, there
|
||||
// is no reason to do a crash detection event. If the server crash detection is
|
||||
// disabled we want to skip anything after this as well.
|
||||
if s.GetState() != environment.ProcessOfflineState || !s.Config().CrashDetectionEnabled {
|
||||
if s.Environment.State() != environment.ProcessOfflineState || !s.Config().CrashDetectionEnabled {
|
||||
if !s.Config().CrashDetectionEnabled {
|
||||
s.Log().Debug("server triggered crash detection but handler is disabled for server process")
|
||||
|
||||
s.PublishConsoleOutputFromDaemon("Server detected as crashed; crash detection is disabled for this instance.")
|
||||
s.PublishConsoleOutputFromDaemon("Aborting automatic restart, crash detection is disabled for this instance.")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -57,14 +57,13 @@ func (s *Server) handleServerCrash() error {
|
||||
|
||||
exitCode, oomKilled, err := s.Environment.ExitState()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// If the system is not configured to detect a clean exit code as a crash, and the
|
||||
// crash is not the result of the program running out of memory, do nothing.
|
||||
if exitCode == 0 && !oomKilled && !config.Get().System.DetectCleanExitAsCrash {
|
||||
if exitCode == 0 && !oomKilled && !config.Get().System.CrashDetection.DetectCleanExitAsCrash {
|
||||
s.Log().Debug("server exited with successful exit code; system is configured to not detect this as a crash")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -73,11 +72,14 @@ func (s *Server) handleServerCrash() error {
|
||||
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Out of memory: %t", oomKilled))
|
||||
|
||||
c := s.crasher.LastCrashTime()
|
||||
// If the last crash time was within the last 60 seconds we do not want to perform
|
||||
// an automatic reboot of the process. Return an error that can be handled.
|
||||
if !c.IsZero() && c.Add(time.Second*60).After(time.Now()) {
|
||||
s.PublishConsoleOutputFromDaemon("Aborting automatic reboot: last crash occurred less than 60 seconds ago.")
|
||||
timeout := config.Get().System.CrashDetection.Timeout
|
||||
|
||||
// If the last crash time was within the last `timeout` seconds we do not want to perform
|
||||
// an automatic reboot of the process. Return an error that can be handled.
|
||||
//
|
||||
// If timeout is set to 0, always reboot the server (this is probably a terrible idea, but some people want it)
|
||||
if timeout != 0 && !c.IsZero() && c.Add(time.Second*time.Duration(config.Get().System.CrashDetection.Timeout)).After(time.Now()) {
|
||||
s.PublishConsoleOutputFromDaemon("Aborting automatic restart, last crash occurred less than " + strconv.Itoa(timeout) + " seconds ago.")
|
||||
return &crashTooFrequent{}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,15 @@
|
||||
package server
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import (
|
||||
"emperror.dev/errors"
|
||||
)
|
||||
|
||||
var ErrIsRunning = errors.New("server is running")
|
||||
var ErrSuspended = errors.New("server is currently in a suspended state")
|
||||
var (
|
||||
ErrIsRunning = errors.New("server is running")
|
||||
ErrSuspended = errors.New("server is currently in a suspended state")
|
||||
ErrServerIsInstalling = errors.New("server is currently installing")
|
||||
ErrServerIsTransferring = errors.New("server is currently being transferred")
|
||||
)
|
||||
|
||||
type crashTooFrequent struct {
|
||||
}
|
||||
|
||||
@@ -14,7 +14,10 @@ const (
|
||||
ConsoleOutputEvent = "console output"
|
||||
StatusEvent = "status"
|
||||
StatsEvent = "stats"
|
||||
BackupRestoreCompletedEvent = "backup restore completed"
|
||||
BackupCompletedEvent = "backup completed"
|
||||
TransferLogsEvent = "transfer logs"
|
||||
TransferStatusEvent = "transfer status"
|
||||
)
|
||||
|
||||
// Returns the server's emitter instance.
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"os"
|
||||
)
|
||||
|
||||
func (s *Server) Filesystem() *filesystem.Filesystem {
|
||||
return s.fs
|
||||
}
|
||||
|
||||
// Ensures that the data directory for the server instance exists.
|
||||
func (s *Server) EnsureDataDirectoryExists() error {
|
||||
if _, err := os.Stat(s.fs.Path()); err != nil && !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
} else if err != nil {
|
||||
// Create the server data directory because it does not currently exist
|
||||
// on the system.
|
||||
if err := os.MkdirAll(s.fs.Path(), 0700); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := s.fs.Chown("/"); err != nil {
|
||||
s.Log().WithField("error", err).Warn("failed to chown server data directory")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
226
server/filesystem/archive.go
Normal file
226
server/filesystem/archive.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/juju/ratelimit"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/sabhiram/go-gitignore"
|
||||
)
|
||||
|
||||
const memory = 4 * 1024
|
||||
|
||||
var pool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := make([]byte, memory)
|
||||
return b
|
||||
},
|
||||
}
|
||||
|
||||
type Archive struct {
|
||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
||||
// relative to.
|
||||
BasePath string
|
||||
|
||||
// Ignore is a gitignore string (most likely read from a file) of files to ignore
|
||||
// from the archive.
|
||||
Ignore string
|
||||
|
||||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
||||
Files []string
|
||||
}
|
||||
|
||||
// Create creates an archive at dst with all of the files defined in the
|
||||
// included files struct.
|
||||
func (a *Archive) Create(dst string) error {
|
||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Select a writer based off of the WriteLimit configuration option. If there is no
|
||||
// write limit, use the file as the writer.
|
||||
var writer io.Writer
|
||||
if writeLimit := int64(config.Get().System.Backups.WriteLimit * 1024 * 1024); writeLimit > 0 {
|
||||
// Token bucket with a capacity of "writeLimit" MiB, adding "writeLimit" MiB/s
|
||||
// and then wrap the file writer with the token bucket limiter.
|
||||
writer = ratelimit.Writer(f, ratelimit.NewBucketWithRate(float64(writeLimit), writeLimit))
|
||||
} else {
|
||||
writer = f
|
||||
}
|
||||
|
||||
// Create a new gzip writer around the file.
|
||||
gw, _ := pgzip.NewWriterLevel(writer, pgzip.BestSpeed)
|
||||
_ = gw.SetConcurrency(1<<20, 1)
|
||||
defer gw.Close()
|
||||
|
||||
// Create a new tar writer around the gzip writer.
|
||||
tw := tar.NewWriter(gw)
|
||||
defer tw.Close()
|
||||
|
||||
// Configure godirwalk.
|
||||
options := &godirwalk.Options{
|
||||
FollowSymbolicLinks: false,
|
||||
Unsorted: true,
|
||||
Callback: a.callback(tw),
|
||||
}
|
||||
|
||||
// If we're specifically looking for only certain files, or have requested
|
||||
// that certain files be ignored we'll update the callback function to reflect
|
||||
// that request.
|
||||
if len(a.Files) == 0 && len(a.Ignore) > 0 {
|
||||
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
|
||||
|
||||
options.Callback = a.callback(tw, func(_ string, rp string) error {
|
||||
if i.MatchesPath(rp) {
|
||||
return godirwalk.SkipThis
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
} else if len(a.Files) > 0 {
|
||||
options.Callback = a.withFilesCallback(tw)
|
||||
}
|
||||
|
||||
// Recursively walk the path we are archiving.
|
||||
return godirwalk.Walk(a.BasePath, options)
|
||||
}
|
||||
|
||||
// Callback function used to determine if a given file should be included in the archive
|
||||
// being generated.
|
||||
func (a *Archive) callback(tw *tar.Writer, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
||||
return func(path string, de *godirwalk.Dirent) error {
|
||||
// Skip directories because we walking them recursively.
|
||||
if de.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relative := filepath.ToSlash(strings.TrimPrefix(path, a.BasePath+string(filepath.Separator)))
|
||||
|
||||
// Call the additional options passed to this callback function. If any of them return
|
||||
// a non-nil error we will exit immediately.
|
||||
for _, opt := range opts {
|
||||
if err := opt(path, relative); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Add the file to the archive, if it is nested in a directory,
|
||||
// the directory will be automatically "created" in the archive.
|
||||
return a.addToArchive(path, relative, tw)
|
||||
}
|
||||
}
|
||||
|
||||
// Pushes only files defined in the Files key to the final archive.
|
||||
func (a *Archive) withFilesCallback(tw *tar.Writer) func(path string, de *godirwalk.Dirent) error {
|
||||
return a.callback(tw, func(p string, rp string) error {
|
||||
for _, f := range a.Files {
|
||||
// If the given doesn't match, or doesn't have the same prefix continue
|
||||
// to the next item in the loop.
|
||||
if p != f && !strings.HasPrefix(p, f) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Once we have a match return a nil value here so that the loop stops and the
|
||||
// call to this function will correctly include the file in the archive. If there
|
||||
// are no matches we'll never make it to this line, and the final error returned
|
||||
// will be the godirwalk.SkipThis error.
|
||||
return nil
|
||||
}
|
||||
|
||||
return godirwalk.SkipThis
|
||||
})
|
||||
}
|
||||
|
||||
// Adds a given file path to the final archive being created.
|
||||
func (a *Archive) addToArchive(p string, rp string, w *tar.Writer) error {
|
||||
// Lstat the file, this will give us the same information as Stat except that it will not
|
||||
// follow a symlink to it's target automatically. This is important to avoid including
|
||||
// files that exist outside the server root unintentionally in the backup.
|
||||
s, err := os.Lstat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.WrapIff(err, "failed executing os.Lstat on '%s'", rp)
|
||||
}
|
||||
|
||||
// Resolve the symlink target if the file is a symlink.
|
||||
var target string
|
||||
if s.Mode()&os.ModeSymlink != 0 {
|
||||
// Read the target of the symlink. If there are any errors we will dump them out to
|
||||
// the logs, but we're not going to stop the backup. There are far too many cases of
|
||||
// symlinks causing all sorts of unnecessary pain in this process. Sucks to suck if
|
||||
// it doesn't work.
|
||||
target, err = os.Readlink(s.Name())
|
||||
if err != nil {
|
||||
// Ignore the not exist errors specifically, since theres nothing important about that.
|
||||
if !os.IsNotExist(err) {
|
||||
log.WithField("path", rp).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get the tar FileInfoHeader in order to add the file to the archive.
|
||||
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
|
||||
if err != nil {
|
||||
return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", rp)
|
||||
}
|
||||
|
||||
// Fix the header name if the file is not a symlink.
|
||||
if s.Mode()&os.ModeSymlink == 0 {
|
||||
header.Name = rp
|
||||
}
|
||||
|
||||
// Write the tar FileInfoHeader to the archive.
|
||||
if err := w.WriteHeader(header); err != nil {
|
||||
return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", rp)
|
||||
}
|
||||
|
||||
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
|
||||
if header.Size < 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the buffer size is larger than the file size, create a smaller buffer to hold the file.
|
||||
var buf []byte
|
||||
if header.Size < memory {
|
||||
buf = make([]byte, header.Size)
|
||||
} else {
|
||||
// Get a fixed-size buffer from the pool to save on allocations.
|
||||
buf = pool.Get().([]byte)
|
||||
defer func() {
|
||||
buf = make([]byte, memory)
|
||||
pool.Put(buf)
|
||||
}()
|
||||
}
|
||||
|
||||
// Open the file.
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.WrapIff(err, "failed to open '%s' for copying", header.Name)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Copy the file's contents to the archive using our buffer.
|
||||
if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
|
||||
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,80 +1,27 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/server/backup"
|
||||
ignore "github.com/sabhiram/go-gitignore"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
// Given a directory, iterate through all of the files and folders within it and determine
|
||||
// if they should be included in the output based on an array of ignored matches. This uses
|
||||
// standard .gitignore formatting to make that determination.
|
||||
// CompressFiles compresses all of the files matching the given paths in the
|
||||
// specified directory. This function also supports passing nested paths to only
|
||||
// compress certain files and folders when working in a larger directory. This
|
||||
// effectively creates a local backup, but rather than ignoring specific files
|
||||
// and folders, it takes an allow-list of files and folders.
|
||||
//
|
||||
// If no ignored files are passed through you'll get the entire directory listing.
|
||||
func (fs *Filesystem) GetIncludedFiles(dir string, ignored []string) (*backup.IncludedFiles, error) {
|
||||
cleaned, err := fs.SafePath(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
i, err := ignore.CompileIgnoreLines(ignored...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Walk through all of the files and directories on a server. This callback only returns
|
||||
// files found, and will keep walking deeper and deeper into directories.
|
||||
inc := new(backup.IncludedFiles)
|
||||
|
||||
err = godirwalk.Walk(cleaned, &godirwalk.Options{
|
||||
Unsorted: true,
|
||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
||||
sp := p
|
||||
if e.IsSymlink() {
|
||||
sp, err = fs.SafePath(p)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrBadPathResolution) {
|
||||
return godirwalk.SkipThis
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Only push files into the result array since archives can't create an empty directory within them.
|
||||
if !e.IsDir() {
|
||||
// Avoid unnecessary parsing if there are no ignored files, nothing will match anyways
|
||||
// so no reason to call the function.
|
||||
if len(ignored) == 0 || !i.MatchesPath(strings.TrimPrefix(sp, fs.Path()+"/")) {
|
||||
inc.Push(sp)
|
||||
}
|
||||
}
|
||||
|
||||
// We can't just abort if the path is technically ignored. It is possible there is a nested
|
||||
// file or folder that should not be excluded, so in this case we need to just keep going
|
||||
// until we get to a final state.
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
return inc, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Compresses all of the files matching the given paths in the specified directory. This function
|
||||
// also supports passing nested paths to only compress certain files and folders when working in
|
||||
// a larger directory. This effectively creates a local backup, but rather than ignoring specific
|
||||
// files and folders, it takes an allow-list of files and folders.
|
||||
//
|
||||
// All paths are relative to the dir that is passed in as the first argument, and the compressed
|
||||
// file will be placed at that location named `archive-{date}.tar.gz`.
|
||||
// All paths are relative to the dir that is passed in as the first argument,
|
||||
// and the compressed file will be placed at that location named
|
||||
// `archive-{date}.tar.gz`.
|
||||
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
|
||||
cleanedRootDir, err := fs.SafePath(dir)
|
||||
if err != nil {
|
||||
@@ -91,69 +38,24 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inc := new(backup.IncludedFiles)
|
||||
// Iterate over all of the cleaned paths and merge them into a large object of final file
|
||||
// paths to pass into the archiver. As directories are encountered this will drop into them
|
||||
// and look for all of the files.
|
||||
for _, p := range cleaned {
|
||||
f, err := os.Stat(p)
|
||||
if err != nil {
|
||||
fs.error(err).WithField("path", p).Debug("failed to stat file or directory for compression")
|
||||
continue
|
||||
}
|
||||
a := &Archive{BasePath: cleanedRootDir, Files: cleaned}
|
||||
d := path.Join(
|
||||
cleanedRootDir,
|
||||
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
|
||||
)
|
||||
|
||||
if !f.IsDir() {
|
||||
inc.Push(p)
|
||||
} else {
|
||||
err := godirwalk.Walk(p, &godirwalk.Options{
|
||||
Unsorted: true,
|
||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
||||
sp := p
|
||||
if e.IsSymlink() {
|
||||
// Ensure that any symlinks are properly resolved to their final destination. If
|
||||
// that destination is outside the server directory skip over this entire item, otherwise
|
||||
// use the resolved location for the rest of this function.
|
||||
sp, err = fs.SafePath(p)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrBadPathResolution) {
|
||||
return godirwalk.SkipThis
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !e.IsDir() {
|
||||
inc.Push(sp)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if err := a.Create(d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
a := &backup.Archive{TrimPrefix: fs.Path(), Files: inc}
|
||||
d := path.Join(cleanedRootDir, fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")))
|
||||
|
||||
if err := a.Create(d, context.Background()); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
f, err := os.Stat(d)
|
||||
if err != nil {
|
||||
_ = os.Remove(d)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := fs.hasSpaceFor(f.Size()); err != nil {
|
||||
if err := fs.HasSpaceFor(f.Size()); err != nil {
|
||||
_ = os.Remove(d)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -161,3 +63,83 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// SpaceAvailableForDecompression looks through a given archive and determines
|
||||
// if decompressing it would put the server over its allocated disk space limit.
|
||||
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error {
|
||||
// Don't waste time trying to determine this if we know the server will have the space for
|
||||
// it since there is no limit.
|
||||
if fs.MaxDisk() <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the cached size in a parallel process so that if it is not cached we are not
|
||||
// waiting an unnecessary amount of time on this call.
|
||||
dirSize, err := fs.DiskUsage(false)
|
||||
|
||||
var size int64
|
||||
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
|
||||
return &Error{code: ErrCodeDiskSpace}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "format ") {
|
||||
return &Error{code: ErrCodeUnknownArchive}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DecompressFile will decompress a file in a given directory by using the
|
||||
// archiver tool to infer the file type and go from there. This will walk over
|
||||
// all of the files within the given archive and ensure that there is not a
|
||||
// zip-slip attack being attempted by validating that the final path is within
|
||||
// the server data directory.
|
||||
func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Ensure that the source archive actually exists on the system.
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Walk all of the files in the archiver file and write them to the disk. If any
|
||||
// directory is encountered it will be skipped since we handle creating any missing
|
||||
// directories automatically when writing files.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
name, err := system.ExtractArchiveSourceName(f, dir)
|
||||
if err != nil {
|
||||
return WrapError(err, filepath.Join(dir, f.Name()))
|
||||
}
|
||||
p := filepath.Join(dir, name)
|
||||
// If it is ignored, just don't do anything with the file and skip over it.
|
||||
if err := fs.IsIgnored(p); err != nil {
|
||||
return nil
|
||||
}
|
||||
if err := fs.Writefile(p, f); err != nil {
|
||||
return &Error{code: ErrCodeUnknownError, err: err, resolved: source}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "format ") {
|
||||
return &Error{code: ErrCodeUnknownArchive}
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"archive/zip"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/pkg/errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Look through a given archive and determine if decompressing it would put the server over
|
||||
// its allocated disk space limit.
|
||||
func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (bool, error) {
|
||||
// Don't waste time trying to determine this if we know the server will have the space for
|
||||
// it since there is no limit.
|
||||
if fs.MaxDisk() <= 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Get the cached size in a parallel process so that if it is not cached we are not
|
||||
// waiting an unnecessary amount of time on this call.
|
||||
dirSize, err := fs.DiskUsage(false)
|
||||
|
||||
var size int64
|
||||
// Walk over the archive and figure out just how large the final output would be from unarchiving it.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() {
|
||||
return ErrNotEnoughDiskSpace
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "format ") {
|
||||
return false, ErrUnknownArchiveFormat
|
||||
}
|
||||
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return true, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Decompress a file in a given directory by using the archiver tool to infer the file
|
||||
// type and go from there. This will walk over all of the files within the given archive
|
||||
// and ensure that there is not a zip-slip attack being attempted by validating that the
|
||||
// final path is within the server data directory.
|
||||
func (fs *Filesystem) DecompressFile(dir string, file string) error {
|
||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Make sure the file exists basically.
|
||||
if _, err := os.Stat(source); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// Walk over all of the files spinning up an additional go-routine for each file we've encountered
|
||||
// and then extract that file from the archive and write it to the disk. If any part of this process
|
||||
// encounters an error the entire process will be stopped.
|
||||
err = archiver.Walk(source, func(f archiver.File) error {
|
||||
// Don't waste time with directories, we don't need to create them if they have no contents, and
|
||||
// we will ensure the directory exists when opening the file for writing anyways.
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
var name string
|
||||
|
||||
switch s := f.Sys().(type) {
|
||||
case *tar.Header:
|
||||
name = s.Name
|
||||
case *gzip.Header:
|
||||
name = s.Name
|
||||
case *zip.FileHeader:
|
||||
name = s.Name
|
||||
default:
|
||||
return errors.New(fmt.Sprintf("could not parse underlying data source with type %s", reflect.TypeOf(s).String()))
|
||||
}
|
||||
|
||||
p, err := fs.SafePath(filepath.Join(dir, name))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to generate a safe path to server file")
|
||||
}
|
||||
|
||||
return errors.Wrap(fs.Writefile(p, f), "could not extract file from archive")
|
||||
})
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "format ") {
|
||||
return errors.WithStack(ErrUnknownArchiveFormat)
|
||||
}
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/pkg/errors"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
@@ -36,17 +36,21 @@ func (ult *usageLookupTime) Get() time.Time {
|
||||
|
||||
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
|
||||
func (fs *Filesystem) MaxDisk() int64 {
|
||||
fs.mu.RLock()
|
||||
defer fs.mu.RUnlock()
|
||||
|
||||
return fs.diskLimit
|
||||
return atomic.LoadInt64(&fs.diskLimit)
|
||||
}
|
||||
|
||||
// Sets the disk space limit for this Filesystem instance.
|
||||
func (fs *Filesystem) SetDiskLimit(i int64) {
|
||||
fs.mu.Lock()
|
||||
fs.diskLimit = i
|
||||
fs.mu.Unlock()
|
||||
atomic.SwapInt64(&fs.diskLimit, i)
|
||||
}
|
||||
|
||||
// The same concept as HasSpaceAvailable however this will return an error if there is
|
||||
// no space, rather than a boolean value.
|
||||
func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
||||
if !fs.HasSpaceAvailable(allowStaleValue) {
|
||||
return &Error{code: ErrCodeDiskSpace}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determines if the directory a file is trying to be added to has enough space available
|
||||
@@ -79,10 +83,7 @@ func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
||||
// function for critical logical checks. It should only be used in areas where the actual disk usage
|
||||
// does not need to be perfect, e.g. API responses for server resource usage.
|
||||
func (fs *Filesystem) CachedUsage() int64 {
|
||||
fs.mu.RLock()
|
||||
defer fs.mu.RUnlock()
|
||||
|
||||
return fs.diskUsed
|
||||
return atomic.LoadInt64(&fs.diskUsed)
|
||||
}
|
||||
|
||||
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
||||
@@ -97,12 +98,17 @@ func (fs *Filesystem) CachedUsage() int64 {
|
||||
// This is primarily to avoid a bunch of I/O operations from piling up on the server, especially on servers
|
||||
// with a large amount of files.
|
||||
func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
|
||||
// A disk check interval of 0 means this functionality is completely disabled.
|
||||
if fs.diskCheckInterval == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if !fs.lastLookupTime.Get().After(time.Now().Add(time.Second * fs.diskCheckInterval * -1)) {
|
||||
// If we are now allowing a stale response go ahead and perform the lookup and return the fresh
|
||||
// value. This is a blocking operation to the calling process.
|
||||
if !allowStaleValue {
|
||||
return fs.updateCachedDiskUsage()
|
||||
} else if !fs.lookupInProgress.Get() {
|
||||
} else if !fs.lookupInProgress.Load() {
|
||||
// Otherwise, if we allow a stale value and there isn't a valid item in the cache and we aren't
|
||||
// currently performing a lookup, just do the disk usage calculation in the background.
|
||||
go func(fs *Filesystem) {
|
||||
@@ -128,8 +134,8 @@ func (fs *Filesystem) updateCachedDiskUsage() (int64, error) {
|
||||
// Signal that we're currently updating the disk size so that other calls to the disk checking
|
||||
// functions can determine if they should queue up additional calls to this function. Ensure that
|
||||
// we always set this back to "false" when this process is done executing.
|
||||
fs.lookupInProgress.Set(true)
|
||||
defer fs.lookupInProgress.Set(false)
|
||||
fs.lookupInProgress.Store(true)
|
||||
defer fs.lookupInProgress.Store(false)
|
||||
|
||||
// If there is no size its either because there is no data (in which case running this function
|
||||
// will have effectively no impact), or there is nothing in the cache, in which case we need to
|
||||
@@ -153,7 +159,7 @@ func (fs *Filesystem) updateCachedDiskUsage() (int64, error) {
|
||||
func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
||||
d, err := fs.SafePath(dir)
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var size int64
|
||||
@@ -167,7 +173,7 @@ func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
||||
// it. Otherwise, allow it to continue.
|
||||
if e.IsSymlink() {
|
||||
if _, err := fs.SafePath(p); err != nil {
|
||||
if errors.Is(err, ErrBadPathResolution) {
|
||||
if IsErrorCode(err, ErrCodePathResolution) {
|
||||
return godirwalk.SkipThis
|
||||
}
|
||||
|
||||
@@ -184,13 +190,13 @@ func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
||||
},
|
||||
})
|
||||
|
||||
return size, errors.WithStack(err)
|
||||
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
||||
}
|
||||
|
||||
// Helper function to determine if a server has space available for a file of a given size.
|
||||
// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
|
||||
// will be raised.
|
||||
func (fs *Filesystem) hasSpaceFor(size int64) error {
|
||||
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
||||
if fs.MaxDisk() == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -201,7 +207,7 @@ func (fs *Filesystem) hasSpaceFor(size int64) error {
|
||||
}
|
||||
|
||||
if (s + size) > fs.MaxDisk() {
|
||||
return ErrNotEnoughDiskSpace
|
||||
return &Error{code: ErrCodeDiskSpace}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,16 +1,77 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
)
|
||||
|
||||
var ErrIsDirectory = errors.New("filesystem: is a directory")
|
||||
var ErrNotEnoughDiskSpace = errors.New("filesystem: not enough disk space")
|
||||
var ErrBadPathResolution = errors.New("filesystem: invalid path resolution")
|
||||
var ErrUnknownArchiveFormat = errors.New("filesystem: unknown archive format")
|
||||
type ErrorCode string
|
||||
|
||||
const (
|
||||
ErrCodeIsDirectory ErrorCode = "E_ISDIR"
|
||||
ErrCodeDiskSpace ErrorCode = "E_NODISK"
|
||||
ErrCodeUnknownArchive ErrorCode = "E_UNKNFMT"
|
||||
ErrCodePathResolution ErrorCode = "E_BADPATH"
|
||||
ErrCodeDenylistFile ErrorCode = "E_DENYLIST"
|
||||
ErrCodeUnknownError ErrorCode = "E_UNKNOWN"
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
code ErrorCode
|
||||
// Contains the underlying error leading to this. This value may or may not be
|
||||
// present, it is entirely dependent on how this error was triggered.
|
||||
err error
|
||||
// This contains the value of the final destination that triggered this specific
|
||||
// error event.
|
||||
resolved string
|
||||
// This value is generally only present on errors stemming from a path resolution
|
||||
// error. For everything else you should be setting and reading the resolved path
|
||||
// value which will be far more useful.
|
||||
path string
|
||||
}
|
||||
|
||||
// Code returns the ErrorCode for this specific error instance.
|
||||
func (e *Error) Code() ErrorCode {
|
||||
return e.code
|
||||
}
|
||||
|
||||
// Returns a human-readable error string to identify the Error by.
|
||||
func (e *Error) Error() string {
|
||||
switch e.code {
|
||||
case ErrCodeIsDirectory:
|
||||
return fmt.Sprintf("filesystem: cannot perform action: [%s] is a directory", e.resolved)
|
||||
case ErrCodeDiskSpace:
|
||||
return "filesystem: not enough disk space"
|
||||
case ErrCodeUnknownArchive:
|
||||
return "filesystem: unknown archive format"
|
||||
case ErrCodeDenylistFile:
|
||||
r := e.resolved
|
||||
if r == "" {
|
||||
r = "<empty>"
|
||||
}
|
||||
return fmt.Sprintf("filesystem: file access prohibited: [%s] is on the denylist", r)
|
||||
case ErrCodePathResolution:
|
||||
r := e.resolved
|
||||
if r == "" {
|
||||
r = "<empty>"
|
||||
}
|
||||
return fmt.Sprintf("filesystem: server path [%s] resolves to a location outside the server root: %s", e.path, r)
|
||||
case ErrCodeUnknownError:
|
||||
fallthrough
|
||||
default:
|
||||
return fmt.Sprintf("filesystem: an error occurred: %s", e.Cause())
|
||||
}
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of this filesystem error. In some causes
|
||||
// there may not be a cause present, in which case nil will be returned.
|
||||
func (e *Error) Cause() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Generates an error logger instance with some basic information.
|
||||
func (fs *Filesystem) error(err error) *log.Entry {
|
||||
@@ -23,13 +84,49 @@ func (fs *Filesystem) error(err error) *log.Entry {
|
||||
// directory, otherwise return nil. Returning this error for a file will stop the walking
|
||||
// for the remainder of the directory. This is assuming an os.FileInfo struct was even returned.
|
||||
func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
|
||||
if !errors.Is(err, ErrBadPathResolution) {
|
||||
if !IsErrorCode(err, ErrCodePathResolution) {
|
||||
return err
|
||||
}
|
||||
|
||||
if f != nil && f.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsFilesystemError checks if the given error is one of the Filesystem errors.
|
||||
func IsFilesystemError(err error) bool {
|
||||
var fserr *Error
|
||||
if err != nil && errors.As(err, &fserr) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsErrorCode checks if "err" is a filesystem Error type. If so, it will then
|
||||
// drop in and check that the error code is the same as the provided ErrorCode
|
||||
// passed in "code".
|
||||
func IsErrorCode(err error, code ErrorCode) bool {
|
||||
var fserr *Error
|
||||
if err != nil && errors.As(err, &fserr) {
|
||||
return fserr.code == code
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NewBadPathResolution returns a new BadPathResolution error.
|
||||
func NewBadPathResolution(path string, resolved string) *Error {
|
||||
return &Error{code: ErrCodePathResolution, path: path, resolved: resolved}
|
||||
}
|
||||
|
||||
// WrapError wraps the provided error as a Filesystem error and attaches the
|
||||
// provided resolved source to it. If the error is already a Filesystem error
|
||||
// no action is taken.
|
||||
func WrapError(err error, resolved string) *Error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if IsFilesystemError(err) {
|
||||
return err.(*Error)
|
||||
}
|
||||
return &Error{code: ErrCodeUnknownError, err: err, resolved: resolved}
|
||||
}
|
||||
24
server/filesystem/errors_test.go
Normal file
24
server/filesystem/errors_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
. "github.com/franela/goblin"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilesystem_PathResolutionError(t *testing.T) {
|
||||
g := Goblin(t)
|
||||
|
||||
g.Describe("NewBadPathResolutionError", func() {
|
||||
g.It("is can detect itself as an error correctly", func() {
|
||||
err := NewBadPathResolution("foo", "bar")
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
g.Assert(err.Error()).Equal("filesystem: server path [foo] resolves to a location outside the server root: bar")
|
||||
g.Assert(IsErrorCode(&Error{code: ErrCodeIsDirectory}, ErrCodePathResolution)).IsFalse()
|
||||
})
|
||||
|
||||
g.It("returns <empty> if no destination path is provided", func() {
|
||||
err := NewBadPathResolution("foo", "")
|
||||
g.Assert(err.Error()).Equal("filesystem: server path [foo] resolves to a location outside the server root: <empty>")
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -2,11 +2,6 @@ package filesystem
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"github.com/gabriel-vasile/mimetype"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -16,15 +11,24 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/gabriel-vasile/mimetype"
|
||||
"github.com/karrick/godirwalk"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
ignore "github.com/sabhiram/go-gitignore"
|
||||
)
|
||||
|
||||
type Filesystem struct {
|
||||
mu sync.RWMutex
|
||||
lastLookupTime *usageLookupTime
|
||||
lookupInProgress system.AtomicBool
|
||||
lookupInProgress *system.AtomicBool
|
||||
diskUsed int64
|
||||
diskCheckInterval time.Duration
|
||||
denylist *ignore.GitIgnore
|
||||
|
||||
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
|
||||
diskLimit int64
|
||||
@@ -35,91 +39,129 @@ type Filesystem struct {
|
||||
isTest bool
|
||||
}
|
||||
|
||||
// Creates a new Filesystem instance for a given server.
|
||||
func New(root string, size int64) *Filesystem {
|
||||
// New creates a new Filesystem instance for a given server.
|
||||
func New(root string, size int64, denylist []string) *Filesystem {
|
||||
return &Filesystem{
|
||||
root: root,
|
||||
diskLimit: size,
|
||||
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
||||
lastLookupTime: &usageLookupTime{},
|
||||
lookupInProgress: system.NewAtomicBool(false),
|
||||
denylist: ignore.CompileIgnoreLines(denylist...),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the root path for the Filesystem instance.
|
||||
// Path returns the root path for the Filesystem instance.
|
||||
func (fs *Filesystem) Path() string {
|
||||
return fs.root
|
||||
}
|
||||
|
||||
// File returns a reader for a file instance as well as the stat information.
|
||||
func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
|
||||
cleaned, err := fs.SafePath(p)
|
||||
if err != nil {
|
||||
return nil, Stat{}, err
|
||||
}
|
||||
st, err := fs.Stat(cleaned)
|
||||
if err != nil {
|
||||
return nil, Stat{}, err
|
||||
}
|
||||
if st.IsDir() {
|
||||
return nil, Stat{}, &Error{code: ErrCodeIsDirectory}
|
||||
}
|
||||
f, err := os.Open(cleaned)
|
||||
if err != nil {
|
||||
return nil, Stat{}, err
|
||||
}
|
||||
return f, st, nil
|
||||
}
|
||||
|
||||
// Acts by creating the given file and path on the disk if it is not present already. If
|
||||
// it is present, the file is opened using the defaults which will truncate the contents.
|
||||
// The opened file is then returned to the caller.
|
||||
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
||||
cleaned, err := fs.SafePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := os.OpenFile(cleaned, flag, 0644)
|
||||
if err == nil {
|
||||
return f, nil
|
||||
}
|
||||
// If the error is not because it doesn't exist then we just need to bail at this point.
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file handle")
|
||||
}
|
||||
// Only create and chown the directory if it doesn't exist.
|
||||
if _, err := os.Stat(filepath.Dir(cleaned)); errors.Is(err, os.ErrNotExist) {
|
||||
// Create the path leading up to the file we're trying to create, setting the final perms
|
||||
// on it as we go.
|
||||
if err := os.MkdirAll(filepath.Dir(cleaned), 0755); err != nil {
|
||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to create directory tree")
|
||||
}
|
||||
if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
o := &fileOpener{}
|
||||
// Try to open the file now that we have created the pathing necessary for it, and then
|
||||
// Chown that file so that the permissions don't mess with things.
|
||||
f, err = o.open(cleaned, flag, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file with wait")
|
||||
}
|
||||
_ = fs.Chown(cleaned)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Reads a file on the system and returns it as a byte representation in a file
|
||||
// reader. This is not the most memory efficient usage since it will be reading the
|
||||
// entirety of the file into memory.
|
||||
func (fs *Filesystem) Readfile(p string, w io.Writer) error {
|
||||
cleaned, err := fs.SafePath(p)
|
||||
file, _, err := fs.File(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if st, err := os.Stat(cleaned); err != nil {
|
||||
return err
|
||||
} else if st.IsDir() {
|
||||
return ErrIsDirectory
|
||||
}
|
||||
|
||||
f, err := os.Open(cleaned)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = bufio.NewReader(f).WriteTo(w)
|
||||
|
||||
defer file.Close()
|
||||
_, err = bufio.NewReader(file).WriteTo(w)
|
||||
return err
|
||||
}
|
||||
|
||||
// Writes a file to the system. If the file does not already exist one will be created.
|
||||
// Writefile writes a file to the system. If the file does not already exist one
|
||||
// will be created. This will also properly recalculate the disk space used by
|
||||
// the server when writing new files or modifying existing ones.
|
||||
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
||||
cleaned, err := fs.SafePath(p)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
var currentSize int64
|
||||
// If the file does not exist on the system already go ahead and create the pathway
|
||||
// to it and an empty file. We'll then write to it later on after this completes.
|
||||
if stat, err := os.Stat(cleaned); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(cleaned), 0755); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
stat, err := os.Stat(cleaned)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
|
||||
} else if err == nil {
|
||||
if stat.IsDir() {
|
||||
return ErrIsDirectory
|
||||
return &Error{code: ErrCodeIsDirectory, resolved: cleaned}
|
||||
}
|
||||
|
||||
currentSize = stat.Size()
|
||||
}
|
||||
|
||||
br := bufio.NewReader(r)
|
||||
// Check that the new size we're writing to the disk can fit. If there is currently a file
|
||||
// we'll subtract that current file size from the size of the buffer to determine the amount
|
||||
// of new data we're writing (or amount we're removing if smaller).
|
||||
if err := fs.hasSpaceFor(int64(br.Size()) - currentSize); err != nil {
|
||||
// Check that the new size we're writing to the disk can fit. If there is currently
|
||||
// a file we'll subtract that current file size from the size of the buffer to determine
|
||||
// the amount of new data we're writing (or amount we're removing if smaller).
|
||||
if err := fs.HasSpaceFor(int64(br.Size()) - currentSize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o := &fileOpener{}
|
||||
// This will either create the file if it does not already exist, or open and
|
||||
// truncate the existing file.
|
||||
file, err := o.open(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
// Touch the file and return the handle to it at this point. This will create the file,
|
||||
// any necessary directories, and set the proper owner of the file.
|
||||
file, err := fs.Touch(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
@@ -129,8 +171,6 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
||||
// Adjust the disk usage to account for the old size and the new size of the file.
|
||||
fs.addDisk(sz - currentSize)
|
||||
|
||||
// Finally, chown the file to ensure the permissions don't end up out-of-whack
|
||||
// if we had just created it.
|
||||
return fs.Chown(cleaned)
|
||||
}
|
||||
|
||||
@@ -138,9 +178,8 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
||||
func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
||||
cleaned, err := fs.SafePath(path.Join(p, name))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return os.MkdirAll(cleaned, 0755)
|
||||
}
|
||||
|
||||
@@ -148,12 +187,12 @@ func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
||||
func (fs *Filesystem) Rename(from string, to string) error {
|
||||
cleanedFrom, err := fs.SafePath(from)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
cleanedTo, err := fs.SafePath(to)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// If the target file or directory already exists the rename function will fail, so just
|
||||
@@ -171,7 +210,7 @@ func (fs *Filesystem) Rename(from string, to string) error {
|
||||
// we're not at the root directory level.
|
||||
if d != fs.Path() {
|
||||
if mkerr := os.MkdirAll(d, 0755); mkerr != nil {
|
||||
return errors.Wrap(mkerr, "failed to create directory structure for file rename")
|
||||
return errors.WithMessage(mkerr, "failed to create directory structure for file rename")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -185,7 +224,7 @@ func (fs *Filesystem) Rename(from string, to string) error {
|
||||
func (fs *Filesystem) Chown(path string) error {
|
||||
cleaned, err := fs.SafePath(path)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if fs.isTest {
|
||||
@@ -197,21 +236,21 @@ func (fs *Filesystem) Chown(path string) error {
|
||||
|
||||
// Start by just chowning the initial path that we received.
|
||||
if err := os.Chown(cleaned, uid, gid); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
|
||||
}
|
||||
|
||||
// If this is not a directory we can now return from the function, there is nothing
|
||||
// left that we need to do.
|
||||
if st, _ := os.Stat(cleaned); !st.IsDir() {
|
||||
if st, err := os.Stat(cleaned); err != nil || !st.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If this was a directory, begin walking over its contents recursively and ensure that all
|
||||
// of the subfiles and directories get their permissions updated as well.
|
||||
return godirwalk.Walk(cleaned, &godirwalk.Options{
|
||||
err = godirwalk.Walk(cleaned, &godirwalk.Options{
|
||||
Unsorted: true,
|
||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
||||
// Do not attempt to chmod a symlink. Go's os.Chown function will affect the symlink
|
||||
// Do not attempt to chown a symlink. Go's os.Chown function will affect the symlink
|
||||
// so if it points to a location outside the data directory the user would be able to
|
||||
// (un)intentionally modify that files permissions.
|
||||
if e.IsSymlink() {
|
||||
@@ -225,6 +264,25 @@ func (fs *Filesystem) Chown(path string) error {
|
||||
return os.Chown(p, uid, gid)
|
||||
},
|
||||
})
|
||||
|
||||
return errors.Wrap(err, "server/filesystem: chown: failed to chown during walk function")
|
||||
}
|
||||
|
||||
func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
|
||||
cleaned, err := fs.SafePath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fs.isTest {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.Chmod(cleaned, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Begin looping up to 50 times to try and create a unique copy file name. This will take
|
||||
@@ -237,7 +295,7 @@ func (fs *Filesystem) Chown(path string) error {
|
||||
// looping endlessly.
|
||||
func (fs *Filesystem) findCopySuffix(dir string, name string, extension string) (string, error) {
|
||||
var i int
|
||||
var suffix = " copy"
|
||||
suffix := " copy"
|
||||
|
||||
for i = 0; i < 51; i++ {
|
||||
if i > 0 {
|
||||
@@ -248,7 +306,7 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
|
||||
// If we stat the file and it does not exist that means we're good to create the copy. If it
|
||||
// does exist, we'll just continue to the next loop and try again.
|
||||
if _, err := fs.Stat(path.Join(dir, n)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -268,12 +326,12 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
|
||||
func (fs *Filesystem) Copy(p string) error {
|
||||
cleaned, err := fs.SafePath(p)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := os.Stat(cleaned)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
} else if s.IsDir() || !s.Mode().IsRegular() {
|
||||
// If this is a directory or not a regular file, just throw a not-exist error
|
||||
// since anything calling this function should understand what that means.
|
||||
@@ -281,7 +339,7 @@ func (fs *Filesystem) Copy(p string) error {
|
||||
}
|
||||
|
||||
// Check that copying this file wouldn't put the server over its limit.
|
||||
if err := fs.hasSpaceFor(s.Size()); err != nil {
|
||||
if err := fs.HasSpaceFor(s.Size()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -300,17 +358,33 @@ func (fs *Filesystem) Copy(p string) error {
|
||||
|
||||
source, err := os.Open(cleaned)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
defer source.Close()
|
||||
|
||||
n, err := fs.findCopySuffix(relative, name, extension)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fs.Writefile(path.Join(relative, n), source)
|
||||
}
|
||||
|
||||
// Deletes a file or folder from the system. Prevents the user from accidentally
|
||||
// (or maliciously) removing their root server data directory.
|
||||
// TruncateRootDirectory removes _all_ files and directories from a server's
|
||||
// data directory and resets the used disk space to zero.
|
||||
func (fs *Filesystem) TruncateRootDirectory() error {
|
||||
if err := os.RemoveAll(fs.Path()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Mkdir(fs.Path(), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a file or folder from the system. Prevents the user from
|
||||
// accidentally (or maliciously) removing their root server data directory.
|
||||
func (fs *Filesystem) Delete(p string) error {
|
||||
wg := sync.WaitGroup{}
|
||||
// This is one of the few (only?) places in the codebase where we're explicitly not using
|
||||
@@ -324,7 +398,7 @@ func (fs *Filesystem) Delete(p string) error {
|
||||
// exists within the data directory.
|
||||
resolved := fs.unsafeFilePath(p)
|
||||
if !fs.unsafeIsInDataDirectory(resolved) {
|
||||
return ErrBadPathResolution
|
||||
return NewBadPathResolution(p, resolved)
|
||||
}
|
||||
|
||||
// Block any whoopsies.
|
||||
@@ -381,9 +455,9 @@ func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File,
|
||||
}
|
||||
}
|
||||
|
||||
// Lists the contents of a given directory and returns stat information about each
|
||||
// file and folder within it.
|
||||
func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) {
|
||||
// ListDirectory lists the contents of a given directory and returns stat
|
||||
// information about each file and folder within it.
|
||||
func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
|
||||
cleaned, err := fs.SafePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -399,7 +473,7 @@ func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) {
|
||||
// You must initialize the output of this directory as a non-nil value otherwise
|
||||
// when it is marshaled into a JSON object you'll just get 'null' back, which will
|
||||
// break the panel badly.
|
||||
out := make([]*Stat, len(files))
|
||||
out := make([]Stat, len(files))
|
||||
|
||||
// Iterate over all of the files and directories returned and perform an async process
|
||||
// to get the mime-type for them all.
|
||||
@@ -426,15 +500,10 @@ func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) {
|
||||
}
|
||||
}
|
||||
|
||||
st := &Stat{
|
||||
Info: f,
|
||||
Mimetype: d,
|
||||
}
|
||||
|
||||
st := Stat{FileInfo: f, Mimetype: d}
|
||||
if m != nil {
|
||||
st.Mimetype = m.String()
|
||||
}
|
||||
|
||||
out[idx] = st
|
||||
}(i, file)
|
||||
}
|
||||
@@ -444,17 +513,16 @@ func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) {
|
||||
// Sort the output alphabetically to begin with since we've run the output
|
||||
// through an asynchronous process and the order is gonna be very random.
|
||||
sort.SliceStable(out, func(i, j int) bool {
|
||||
if out[i].Info.Name() == out[j].Info.Name() || out[i].Info.Name() > out[j].Info.Name() {
|
||||
if out[i].Name() == out[j].Name() || out[i].Name() > out[j].Name() {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
|
||||
// Then, sort it so that directories are listed first in the output. Everything
|
||||
// will continue to be alphabetized at this point.
|
||||
sort.SliceStable(out, func(i, j int) bool {
|
||||
return out[i].Info.IsDir()
|
||||
return out[i].IsDir()
|
||||
})
|
||||
|
||||
return out, nil
|
||||
|
||||
@@ -3,8 +3,6 @@ package filesystem
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
. "github.com/franela/goblin"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -12,6 +10,9 @@ import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
|
||||
. "github.com/franela/goblin"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
)
|
||||
|
||||
func NewFs() (*Filesystem, *rootFs) {
|
||||
@@ -33,7 +34,7 @@ func NewFs() (*Filesystem, *rootFs) {
|
||||
|
||||
rfs.reset()
|
||||
|
||||
fs := New(filepath.Join(tmpDir, "/server"), 0)
|
||||
fs := New(filepath.Join(tmpDir, "/server"), 0, []string{})
|
||||
fs.isTest = true
|
||||
|
||||
return fs, &rfs
|
||||
@@ -70,224 +71,6 @@ func (rfs *rootFs) reset() {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilesystem_Path(t *testing.T) {
|
||||
g := Goblin(t)
|
||||
fs, rfs := NewFs()
|
||||
|
||||
g.Describe("Path", func() {
|
||||
g.It("returns the root path for the instance", func() {
|
||||
g.Assert(fs.Path()).Equal(filepath.Join(rfs.root, "/server"))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilesystem_SafePath(t *testing.T) {
|
||||
g := Goblin(t)
|
||||
fs, rfs := NewFs()
|
||||
prefix := filepath.Join(rfs.root, "/server")
|
||||
|
||||
g.Describe("SafePath", func() {
|
||||
g.It("returns a cleaned path to a given file", func() {
|
||||
p, err := fs.SafePath("test.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/test.txt")
|
||||
|
||||
p, err = fs.SafePath("/test.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/test.txt")
|
||||
|
||||
p, err = fs.SafePath("./test.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/test.txt")
|
||||
|
||||
p, err = fs.SafePath("/foo/../test.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/test.txt")
|
||||
|
||||
p, err = fs.SafePath("/foo/bar")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/foo/bar")
|
||||
})
|
||||
|
||||
g.It("handles root directory access", func() {
|
||||
p, err := fs.SafePath("/")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix)
|
||||
|
||||
p, err = fs.SafePath("")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix)
|
||||
})
|
||||
|
||||
g.It("removes trailing slashes from paths", func() {
|
||||
p, err := fs.SafePath("/foo/bar/")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/foo/bar")
|
||||
})
|
||||
|
||||
g.It("handles deeply nested directories that do not exist", func() {
|
||||
p, err := fs.SafePath("/foo/bar/baz/quaz/../../ducks/testing.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/foo/bar/ducks/testing.txt")
|
||||
})
|
||||
|
||||
g.It("blocks access to files outside the root directory", func() {
|
||||
p, err := fs.SafePath("../test.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(p).Equal("")
|
||||
|
||||
p, err = fs.SafePath("/../test.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(p).Equal("")
|
||||
|
||||
p, err = fs.SafePath("./foo/../../test.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(p).Equal("")
|
||||
|
||||
p, err = fs.SafePath("..")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(p).Equal("")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// We test against accessing files outside the root directory in the tests, however it
|
||||
// is still possible for someone to mess up and not properly use this safe path call. In
|
||||
// order to truly confirm this, we'll try to pass in a symlinked malicious file to all of
|
||||
// the calls and ensure they all fail with the same reason.
|
||||
func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||
g := Goblin(t)
|
||||
fs, rfs := NewFs()
|
||||
|
||||
if err := rfs.CreateServerFile("/../malicious.txt", "external content"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := os.Mkdir(filepath.Join(rfs.root, "/malicious_dir"), 0777); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := os.Symlink(filepath.Join(rfs.root, "malicious.txt"), filepath.Join(rfs.root, "/server/symlinked.txt")); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := os.Symlink(filepath.Join(rfs.root, "/malicious_dir"), filepath.Join(rfs.root, "/server/external_dir")); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
g.Describe("Readfile", func() {
|
||||
g.It("cannot read a file symlinked outside the root", func() {
|
||||
b := bytes.Buffer{}
|
||||
|
||||
err := fs.Readfile("symlinked.txt", &b)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Writefile", func() {
|
||||
g.It("cannot write to a file symlinked outside the root", func() {
|
||||
r := bytes.NewReader([]byte("testing"))
|
||||
|
||||
err := fs.Writefile("symlinked.txt", r)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot write a file to a directory symlinked outside the root", func() {
|
||||
r := bytes.NewReader([]byte("testing"))
|
||||
|
||||
err := fs.Writefile("external_dir/foo.txt", r)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("CreateDirectory", func() {
|
||||
g.It("cannot create a directory outside the root", func() {
|
||||
err := fs.CreateDirectory("my_dir", "external_dir")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot create a nested directory outside the root", func() {
|
||||
err := fs.CreateDirectory("my/nested/dir", "external_dir/foo/bar")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot create a nested directory outside the root", func() {
|
||||
err := fs.CreateDirectory("my/nested/dir", "external_dir/server")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Rename", func() {
|
||||
g.It("cannot rename a file symlinked outside the directory root", func() {
|
||||
err := fs.Rename("symlinked.txt", "foo.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot rename a symlinked directory outside the root", func() {
|
||||
err := fs.Rename("external_dir", "foo")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot rename a file to a location outside the directory root", func() {
|
||||
rfs.CreateServerFile("my_file.txt", "internal content")
|
||||
|
||||
err := fs.Rename("my_file.txt", "external_dir/my_file.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Chown", func() {
|
||||
g.It("cannot chown a file symlinked outside the directory root", func() {
|
||||
err := fs.Chown("symlinked.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot chown a directory symlinked outside the directory root", func() {
|
||||
err := fs.Chown("external_dir")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Copy", func() {
|
||||
g.It("cannot copy a file symlinked outside the directory root", func() {
|
||||
err := fs.Copy("symlinked.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Delete", func() {
|
||||
g.It("deletes the symlinked file but leaves the source", func() {
|
||||
err := fs.Delete("symlinked.txt")
|
||||
g.Assert(err).IsNil()
|
||||
|
||||
_, err = os.Stat(filepath.Join(rfs.root, "malicious.txt"))
|
||||
g.Assert(err).IsNil()
|
||||
|
||||
_, err = rfs.StatServerFile("symlinked.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
rfs.reset()
|
||||
}
|
||||
|
||||
func TestFilesystem_Readfile(t *testing.T) {
|
||||
g := Goblin(t)
|
||||
fs, rfs := NewFs()
|
||||
@@ -316,7 +99,7 @@ func TestFilesystem_Readfile(t *testing.T) {
|
||||
|
||||
err = fs.Readfile("test.txt", buf)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrIsDirectory)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodeIsDirectory)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot open a file outside the root directory", func() {
|
||||
@@ -325,7 +108,7 @@ func TestFilesystem_Readfile(t *testing.T) {
|
||||
|
||||
err = fs.Readfile("/../test.txt", buf)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.AfterEach(func() {
|
||||
@@ -386,7 +169,7 @@ func TestFilesystem_Writefile(t *testing.T) {
|
||||
|
||||
err := fs.Writefile("/some/../foo/../../test.txt", r)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot write a file that exceeds the disk limits", func() {
|
||||
@@ -400,7 +183,7 @@ func TestFilesystem_Writefile(t *testing.T) {
|
||||
r := bytes.NewReader(b)
|
||||
err = fs.Writefile("test.txt", r)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrNotEnoughDiskSpace)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
||||
})
|
||||
|
||||
/*g.It("updates the total space used when a file is appended to", func() {
|
||||
@@ -477,7 +260,7 @@ func TestFilesystem_CreateDirectory(t *testing.T) {
|
||||
g.It("should not allow the creation of directories outside the root", func() {
|
||||
err := fs.CreateDirectory("test", "e/../../something")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("should not increment the disk usage", func() {
|
||||
@@ -527,7 +310,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||
g.It("does not allow renaming to a location outside the root", func() {
|
||||
err := fs.Rename("source.txt", "../target.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("does not allow renaming from a location outside the root", func() {
|
||||
@@ -535,7 +318,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||
|
||||
err = fs.Rename("/../ext-source.txt", "target.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("allows a file to be renamed", func() {
|
||||
@@ -613,7 +396,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||
|
||||
err = fs.Copy("../ext-source.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("should return an error if the source directory is outside the root", func() {
|
||||
@@ -625,11 +408,11 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||
|
||||
err = fs.Copy("../nested/in/dir/ext-source.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
|
||||
err = fs.Copy("nested/in/../../../nested/in/dir/ext-source.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("should return an error if the source is a directory", func() {
|
||||
@@ -646,7 +429,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||
|
||||
err := fs.Copy("source.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrNotEnoughDiskSpace)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("should create a copy of the file and increment the disk used", func() {
|
||||
@@ -721,7 +504,7 @@ func TestFilesystem_Delete(t *testing.T) {
|
||||
|
||||
err = fs.Delete("../ext-source.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, ErrBadPathResolution)).IsTrue()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("does not allow the deletion of the root directory", func() {
|
||||
|
||||
@@ -2,13 +2,30 @@ package filesystem
|
||||
|
||||
import (
|
||||
"context"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Checks if the given file or path is in the server's file denylist. If so, an Error
|
||||
// is returned, otherwise nil is returned.
|
||||
func (fs *Filesystem) IsIgnored(paths ...string) error {
|
||||
for _, p := range paths {
|
||||
sp, err := fs.SafePath(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fs.denylist.MatchesPath(sp) {
|
||||
return &Error{code: ErrCodeDenylistFile, path: p, resolved: sp}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Normalizes a directory being passed in to ensure the user is not able to escape
|
||||
// from their data directory. After normalization if the directory is still within their home
|
||||
// path it is returned. If they managed to "escape" an error will be returned.
|
||||
@@ -23,9 +40,9 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
|
||||
// At the same time, evaluate the symlink status and determine where this file or folder
|
||||
// is truly pointing to.
|
||||
p, err := filepath.EvalSymlinks(r)
|
||||
ep, err := filepath.EvalSymlinks(r)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
|
||||
} else if os.IsNotExist(err) {
|
||||
// The requested directory doesn't exist, so at this point we need to iterate up the
|
||||
// path chain until we hit a directory that _does_ exist and can be validated.
|
||||
@@ -53,7 +70,7 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
// attempt going on, and we should NOT resolve this path for them.
|
||||
if nonExistentPathResolution != "" {
|
||||
if !fs.unsafeIsInDataDirectory(nonExistentPathResolution) {
|
||||
return "", ErrBadPathResolution
|
||||
return "", NewBadPathResolution(p, nonExistentPathResolution)
|
||||
}
|
||||
|
||||
// If the nonExistentPathResolution variable is not empty then the initial path requested
|
||||
@@ -66,11 +83,11 @@ func (fs *Filesystem) SafePath(p string) (string, error) {
|
||||
// If the requested directory from EvalSymlinks begins with the server root directory go
|
||||
// ahead and return it. If not we'll return an error which will block any further action
|
||||
// on the file.
|
||||
if fs.unsafeIsInDataDirectory(p) {
|
||||
return p, nil
|
||||
if fs.unsafeIsInDataDirectory(ep) {
|
||||
return ep, nil
|
||||
}
|
||||
|
||||
return "", ErrBadPathResolution
|
||||
return "", NewBadPathResolution(p, r)
|
||||
}
|
||||
|
||||
// Generate a path to the file by cleaning it up and appending the root server path to it. This
|
||||
|
||||
228
server/filesystem/path_test.go
Normal file
228
server/filesystem/path_test.go
Normal file
@@ -0,0 +1,228 @@
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"emperror.dev/errors"
|
||||
. "github.com/franela/goblin"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFilesystem_Path(t *testing.T) {
|
||||
g := Goblin(t)
|
||||
fs, rfs := NewFs()
|
||||
|
||||
g.Describe("Path", func() {
|
||||
g.It("returns the root path for the instance", func() {
|
||||
g.Assert(fs.Path()).Equal(filepath.Join(rfs.root, "/server"))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestFilesystem_SafePath(t *testing.T) {
|
||||
g := Goblin(t)
|
||||
fs, rfs := NewFs()
|
||||
prefix := filepath.Join(rfs.root, "/server")
|
||||
|
||||
g.Describe("SafePath", func() {
|
||||
g.It("returns a cleaned path to a given file", func() {
|
||||
p, err := fs.SafePath("test.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/test.txt")
|
||||
|
||||
p, err = fs.SafePath("/test.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/test.txt")
|
||||
|
||||
p, err = fs.SafePath("./test.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/test.txt")
|
||||
|
||||
p, err = fs.SafePath("/foo/../test.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/test.txt")
|
||||
|
||||
p, err = fs.SafePath("/foo/bar")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/foo/bar")
|
||||
})
|
||||
|
||||
g.It("handles root directory access", func() {
|
||||
p, err := fs.SafePath("/")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix)
|
||||
|
||||
p, err = fs.SafePath("")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix)
|
||||
})
|
||||
|
||||
g.It("removes trailing slashes from paths", func() {
|
||||
p, err := fs.SafePath("/foo/bar/")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/foo/bar")
|
||||
})
|
||||
|
||||
g.It("handles deeply nested directories that do not exist", func() {
|
||||
p, err := fs.SafePath("/foo/bar/baz/quaz/../../ducks/testing.txt")
|
||||
g.Assert(err).IsNil()
|
||||
g.Assert(p).Equal(prefix + "/foo/bar/ducks/testing.txt")
|
||||
})
|
||||
|
||||
g.It("blocks access to files outside the root directory", func() {
|
||||
p, err := fs.SafePath("../test.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
g.Assert(p).Equal("")
|
||||
|
||||
p, err = fs.SafePath("/../test.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
g.Assert(p).Equal("")
|
||||
|
||||
p, err = fs.SafePath("./foo/../../test.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
g.Assert(p).Equal("")
|
||||
|
||||
p, err = fs.SafePath("..")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
g.Assert(p).Equal("")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// We test against accessing files outside the root directory in the tests, however it
|
||||
// is still possible for someone to mess up and not properly use this safe path call. In
|
||||
// order to truly confirm this, we'll try to pass in a symlinked malicious file to all of
|
||||
// the calls and ensure they all fail with the same reason.
|
||||
func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||
g := Goblin(t)
|
||||
fs, rfs := NewFs()
|
||||
|
||||
if err := rfs.CreateServerFile("/../malicious.txt", "external content"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := os.Mkdir(filepath.Join(rfs.root, "/malicious_dir"), 0777); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := os.Symlink(filepath.Join(rfs.root, "malicious.txt"), filepath.Join(rfs.root, "/server/symlinked.txt")); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := os.Symlink(filepath.Join(rfs.root, "/malicious_dir"), filepath.Join(rfs.root, "/server/external_dir")); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
g.Describe("Readfile", func() {
|
||||
g.It("cannot read a file symlinked outside the root", func() {
|
||||
b := bytes.Buffer{}
|
||||
|
||||
err := fs.Readfile("symlinked.txt", &b)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Writefile", func() {
|
||||
g.It("cannot write to a file symlinked outside the root", func() {
|
||||
r := bytes.NewReader([]byte("testing"))
|
||||
|
||||
err := fs.Writefile("symlinked.txt", r)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot write a file to a directory symlinked outside the root", func() {
|
||||
r := bytes.NewReader([]byte("testing"))
|
||||
|
||||
err := fs.Writefile("external_dir/foo.txt", r)
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("CreateDirectory", func() {
|
||||
g.It("cannot create a directory outside the root", func() {
|
||||
err := fs.CreateDirectory("my_dir", "external_dir")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot create a nested directory outside the root", func() {
|
||||
err := fs.CreateDirectory("my/nested/dir", "external_dir/foo/bar")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot create a nested directory outside the root", func() {
|
||||
err := fs.CreateDirectory("my/nested/dir", "external_dir/server")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Rename", func() {
|
||||
g.It("cannot rename a file symlinked outside the directory root", func() {
|
||||
err := fs.Rename("symlinked.txt", "foo.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot rename a symlinked directory outside the root", func() {
|
||||
err := fs.Rename("external_dir", "foo")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot rename a file to a location outside the directory root", func() {
|
||||
rfs.CreateServerFile("my_file.txt", "internal content")
|
||||
|
||||
err := fs.Rename("my_file.txt", "external_dir/my_file.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Chown", func() {
|
||||
g.It("cannot chown a file symlinked outside the directory root", func() {
|
||||
err := fs.Chown("symlinked.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
|
||||
g.It("cannot chown a directory symlinked outside the directory root", func() {
|
||||
err := fs.Chown("external_dir")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Copy", func() {
|
||||
g.It("cannot copy a file symlinked outside the directory root", func() {
|
||||
err := fs.Copy("symlinked.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
g.Describe("Delete", func() {
|
||||
g.It("deletes the symlinked file but leaves the source", func() {
|
||||
err := fs.Delete("symlinked.txt")
|
||||
g.Assert(err).IsNil()
|
||||
|
||||
_, err = os.Stat(filepath.Join(rfs.root, "malicious.txt"))
|
||||
g.Assert(err).IsNil()
|
||||
|
||||
_, err = rfs.StatServerFile("symlinked.txt")
|
||||
g.Assert(err).IsNotNil()
|
||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
||||
})
|
||||
})
|
||||
|
||||
rfs.reset()
|
||||
}
|
||||
@@ -2,13 +2,15 @@ package filesystem
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/gabriel-vasile/mimetype"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gabriel-vasile/mimetype"
|
||||
)
|
||||
|
||||
type Stat struct {
|
||||
Info os.FileInfo
|
||||
os.FileInfo
|
||||
Mimetype string
|
||||
}
|
||||
|
||||
@@ -18,54 +20,55 @@ func (s *Stat) MarshalJSON() ([]byte, error) {
|
||||
Created string `json:"created"`
|
||||
Modified string `json:"modified"`
|
||||
Mode string `json:"mode"`
|
||||
ModeBits string `json:"mode_bits"`
|
||||
Size int64 `json:"size"`
|
||||
Directory bool `json:"directory"`
|
||||
File bool `json:"file"`
|
||||
Symlink bool `json:"symlink"`
|
||||
Mime string `json:"mime"`
|
||||
}{
|
||||
Name: s.Info.Name(),
|
||||
Name: s.Name(),
|
||||
Created: s.CTime().Format(time.RFC3339),
|
||||
Modified: s.Info.ModTime().Format(time.RFC3339),
|
||||
Mode: s.Info.Mode().String(),
|
||||
Size: s.Info.Size(),
|
||||
Directory: s.Info.IsDir(),
|
||||
File: !s.Info.IsDir(),
|
||||
Symlink: s.Info.Mode().Perm()&os.ModeSymlink != 0,
|
||||
Modified: s.ModTime().Format(time.RFC3339),
|
||||
Mode: s.Mode().String(),
|
||||
// Using `&os.ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
|
||||
ModeBits: strconv.FormatUint(uint64(s.Mode()&os.ModePerm), 8),
|
||||
Size: s.Size(),
|
||||
Directory: s.IsDir(),
|
||||
File: !s.IsDir(),
|
||||
Symlink: s.Mode().Perm()&os.ModeSymlink != 0,
|
||||
Mime: s.Mimetype,
|
||||
})
|
||||
}
|
||||
|
||||
// Stats a file or folder and returns the base stat object from go along with the
|
||||
// MIME data that can be used for editing files.
|
||||
func (fs *Filesystem) Stat(p string) (*Stat, error) {
|
||||
// Stat stats a file or folder and returns the base stat object from go along
|
||||
// with the MIME data that can be used for editing files.
|
||||
func (fs *Filesystem) Stat(p string) (Stat, error) {
|
||||
cleaned, err := fs.SafePath(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return Stat{}, err
|
||||
}
|
||||
|
||||
return fs.unsafeStat(cleaned)
|
||||
}
|
||||
|
||||
func (fs *Filesystem) unsafeStat(p string) (*Stat, error) {
|
||||
func (fs *Filesystem) unsafeStat(p string) (Stat, error) {
|
||||
s, err := os.Stat(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return Stat{}, err
|
||||
}
|
||||
|
||||
var m *mimetype.MIME
|
||||
if !s.IsDir() {
|
||||
m, err = mimetype.DetectFile(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return Stat{}, err
|
||||
}
|
||||
}
|
||||
|
||||
st := &Stat{
|
||||
Info: s,
|
||||
st := Stat{
|
||||
FileInfo: s,
|
||||
Mimetype: "inode/directory",
|
||||
}
|
||||
|
||||
if m != nil {
|
||||
st.Mimetype = m.String()
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Returns the time that the file/folder was created.
|
||||
// CTime returns the time that the file/folder was created.
|
||||
func (s *Stat) CTime() time.Time {
|
||||
st := s.Info.Sys().(*syscall.Stat_t)
|
||||
st := s.Sys().(*syscall.Stat_t)
|
||||
|
||||
return time.Unix(st.Ctimespec.Sec, st.Ctimespec.Nsec)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
// Returns the time that the file/folder was created.
|
||||
func (s *Stat) CTime() time.Time {
|
||||
st := s.Info.Sys().(*syscall.Stat_t)
|
||||
st := s.Sys().(*syscall.Stat_t)
|
||||
|
||||
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
|
||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
||||
|
||||
@@ -8,5 +8,5 @@ import (
|
||||
// However, I have no idea how to do this on windows, so we're skipping it
|
||||
// for right now.
|
||||
func (s *Stat) CTime() time.Time {
|
||||
return s.Info.ModTime()
|
||||
return s.ModTime()
|
||||
}
|
||||
|
||||
@@ -4,22 +4,23 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"html/template"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
"strings"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
)
|
||||
|
||||
// Executes the installation stack for a server process. Bubbles any errors up to the calling
|
||||
@@ -47,7 +48,7 @@ func (s *Server) Install(sync bool) error {
|
||||
s.Log().Info("server configured to skip running installation scripts for this egg, not executing process")
|
||||
}
|
||||
|
||||
s.Log().Debug("notifying panel of server install state")
|
||||
s.Log().WithField("was_successful", err == nil).Debug("notifying panel of server install state")
|
||||
if serr := s.SyncInstallState(err == nil); serr != nil {
|
||||
l := s.Log().WithField("was_successful", err == nil)
|
||||
|
||||
@@ -63,7 +64,7 @@ func (s *Server) Install(sync bool) error {
|
||||
|
||||
// Ensure that the server is marked as offline at this point, otherwise you end up
|
||||
// with a blank value which is a bit confusing.
|
||||
s.SetState(environment.ProcessOfflineState)
|
||||
s.Environment.SetState(environment.ProcessOfflineState)
|
||||
|
||||
// Push an event to the websocket so we can auto-refresh the information in the panel once
|
||||
// the install is completed.
|
||||
@@ -75,7 +76,7 @@ func (s *Server) Install(sync bool) error {
|
||||
// Reinstalls a server's software by utilizing the install script for the server egg. This
|
||||
// does not touch any existing files for the server, other than what the script modifies.
|
||||
func (s *Server) Reinstall() error {
|
||||
if s.GetState() != environment.ProcessOfflineState {
|
||||
if s.Environment.State() != environment.ProcessOfflineState {
|
||||
s.Log().Debug("waiting for server instance to enter a stopped state")
|
||||
if err := s.Environment.WaitForStop(10, true); err != nil {
|
||||
return err
|
||||
@@ -87,18 +88,18 @@ func (s *Server) Reinstall() error {
|
||||
|
||||
// Internal installation function used to simplify reporting back to the Panel.
|
||||
func (s *Server) internalInstall() error {
|
||||
script, rerr, err := api.NewRequester().GetInstallationScript(s.Id())
|
||||
if err != nil || rerr != nil {
|
||||
script, err := s.client.GetInstallationScript(s.Context(), s.Id())
|
||||
if err != nil {
|
||||
if !remote.IsRequestError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New(rerr.String())
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
|
||||
p, err := NewInstallationProcess(s, &script)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
s.Log().Info("beginning installation process for server")
|
||||
@@ -112,7 +113,7 @@ func (s *Server) internalInstall() error {
|
||||
|
||||
type InstallationProcess struct {
|
||||
Server *Server
|
||||
Script *api.InstallationScript
|
||||
Script *remote.InstallationScript
|
||||
|
||||
client *client.Client
|
||||
context context.Context
|
||||
@@ -120,81 +121,46 @@ type InstallationProcess struct {
|
||||
|
||||
// Generates a new installation process struct that will be used to create containers,
|
||||
// and otherwise perform installation commands for a server.
|
||||
func NewInstallationProcess(s *Server, script *api.InstallationScript) (*InstallationProcess, error) {
|
||||
func NewInstallationProcess(s *Server, script *remote.InstallationScript) (*InstallationProcess, error) {
|
||||
proc := &InstallationProcess{
|
||||
Script: script,
|
||||
Server: s,
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.installer.cancel = &cancel
|
||||
|
||||
if c, err := environment.DockerClient(); err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
if c, err := environment.Docker(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
proc.client = c
|
||||
proc.context = ctx
|
||||
proc.context = s.Context()
|
||||
}
|
||||
|
||||
return proc, nil
|
||||
}
|
||||
|
||||
// Try to obtain an exclusive lock on the installation process for the server. Waits up to 10
|
||||
// seconds before aborting with a context timeout.
|
||||
func (s *Server) acquireInstallationLock() error {
|
||||
if s.installer.sem == nil {
|
||||
s.installer.sem = semaphore.NewWeighted(1)
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*10)
|
||||
|
||||
return s.installer.sem.Acquire(ctx, 1)
|
||||
}
|
||||
|
||||
// Determines if the server is actively running the installation process by checking the status
|
||||
// of the semaphore lock.
|
||||
// of the installer lock.
|
||||
func (s *Server) IsInstalling() bool {
|
||||
if s.installer.sem == nil {
|
||||
return false
|
||||
return s.installing.Load()
|
||||
}
|
||||
|
||||
if s.installer.sem.TryAcquire(1) {
|
||||
// If we made it into this block it means we were able to obtain an exclusive lock
|
||||
// on the semaphore. In that case, go ahead and release that lock immediately, and
|
||||
// return false.
|
||||
s.installer.sem.Release(1)
|
||||
|
||||
return false
|
||||
func (s *Server) IsTransferring() bool {
|
||||
return s.transferring.Load()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Aborts the server installation process by calling the cancel function on the installer
|
||||
// context.
|
||||
func (s *Server) AbortInstallation() {
|
||||
if !s.IsInstalling() {
|
||||
return
|
||||
}
|
||||
|
||||
if s.installer.cancel != nil {
|
||||
cancel := *s.installer.cancel
|
||||
|
||||
s.Log().Warn("aborting running installation process")
|
||||
cancel()
|
||||
}
|
||||
func (s *Server) SetTransferring(state bool) {
|
||||
s.transferring.Store(state)
|
||||
}
|
||||
|
||||
// Removes the installer container for the server.
|
||||
func (ip *InstallationProcess) RemoveContainer() {
|
||||
func (ip *InstallationProcess) RemoveContainer() error {
|
||||
err := ip.client.ContainerRemove(ip.context, ip.Server.Id()+"_installer", types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
})
|
||||
|
||||
if err != nil && !client.IsErrNotFound(err) {
|
||||
ip.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to delete server install container")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Runs the installation process, this is done as in a background thread. This will configure
|
||||
@@ -204,8 +170,8 @@ func (ip *InstallationProcess) RemoveContainer() {
|
||||
// log in the server's configuration directory.
|
||||
func (ip *InstallationProcess) Run() error {
|
||||
ip.Server.Log().Debug("acquiring installation process lock")
|
||||
if err := ip.Server.acquireInstallationLock(); err != nil {
|
||||
return err
|
||||
if !ip.Server.installing.SwapIf(true) {
|
||||
return errors.New("install: cannot obtain installation lock")
|
||||
}
|
||||
|
||||
// We now have an exclusive lock on this installation process. Ensure that whenever this
|
||||
@@ -213,24 +179,22 @@ func (ip *InstallationProcess) Run() error {
|
||||
// without encountering a wait timeout.
|
||||
defer func() {
|
||||
ip.Server.Log().Debug("releasing installation process lock")
|
||||
ip.Server.installer.sem.Release(1)
|
||||
ip.Server.installer.cancel = nil
|
||||
ip.Server.installing.Store(false)
|
||||
}()
|
||||
|
||||
if err := ip.BeforeExecute(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
cid, err := ip.Execute()
|
||||
cID, err := ip.Execute()
|
||||
if err != nil {
|
||||
ip.RemoveContainer()
|
||||
|
||||
return errors.WithStack(err)
|
||||
_ = ip.RemoveContainer()
|
||||
return err
|
||||
}
|
||||
|
||||
// If this step fails, log a warning but don't exit out of the process. This is completely
|
||||
// internal to the daemon's functionality, and does not affect the status of the server itself.
|
||||
if err := ip.AfterExecute(cid); err != nil {
|
||||
if err := ip.AfterExecute(cID); err != nil {
|
||||
ip.Server.Log().WithField("error", err).Warn("failed to complete after-execute step of installation process")
|
||||
}
|
||||
|
||||
@@ -248,12 +212,12 @@ func (ip *InstallationProcess) writeScriptToDisk() error {
|
||||
// Make sure the temp directory root exists before trying to make a directory within it. The
|
||||
// ioutil.TempDir call expects this base to exist, it won't create it for you.
|
||||
if err := os.MkdirAll(ip.tempDir(), 0700); err != nil {
|
||||
return errors.Wrap(err, "could not create temporary directory for install process")
|
||||
return errors.WithMessage(err, "could not create temporary directory for install process")
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(filepath.Join(ip.tempDir(), "install.sh"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write server installation script to disk before mount")
|
||||
return errors.WithMessage(err, "failed to write server installation script to disk before mount")
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
@@ -265,7 +229,7 @@ func (ip *InstallationProcess) writeScriptToDisk() error {
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
@@ -275,11 +239,62 @@ func (ip *InstallationProcess) writeScriptToDisk() error {
|
||||
|
||||
// Pulls the docker image to be used for the installation container.
|
||||
func (ip *InstallationProcess) pullInstallationImage() error {
|
||||
r, err := ip.client.ImagePull(ip.context, ip.Script.ContainerImage, types.ImagePullOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
// Get a registry auth configuration from the config.
|
||||
var registryAuth *config.RegistryConfiguration
|
||||
for registry, c := range config.Get().Docker.Registries {
|
||||
if !strings.HasPrefix(ip.Script.ContainerImage, registry) {
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("registry", registry).Debug("using authentication for registry")
|
||||
registryAuth = &c
|
||||
break
|
||||
}
|
||||
|
||||
// Get the ImagePullOptions.
|
||||
imagePullOptions := types.ImagePullOptions{All: false}
|
||||
if registryAuth != nil {
|
||||
b64, err := registryAuth.Base64()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get registry auth credentials")
|
||||
}
|
||||
|
||||
// b64 is a string so if there is an error it will just be empty, not nil.
|
||||
imagePullOptions.RegistryAuth = b64
|
||||
}
|
||||
|
||||
r, err := ip.client.ImagePull(context.Background(), ip.Script.ContainerImage, imagePullOptions)
|
||||
if err != nil {
|
||||
images, ierr := ip.client.ImageList(context.Background(), types.ImageListOptions{})
|
||||
if ierr != nil {
|
||||
// Well damn, something has gone really wrong here, just go ahead and abort there
|
||||
// isn't much anything we can do to try and self-recover from this.
|
||||
return ierr
|
||||
}
|
||||
|
||||
for _, img := range images {
|
||||
for _, t := range img.RepoTags {
|
||||
if t != ip.Script.ContainerImage {
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"image": ip.Script.ContainerImage,
|
||||
"err": err.Error(),
|
||||
}).Warn("unable to pull requested image from remote source, however the image exists locally")
|
||||
|
||||
// Okay, we found a matching container image, in that case just go ahead and return
|
||||
// from this function, since there is nothing else we need to do here.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
log.WithField("image", ip.Script.ContainerImage).Debug("pulling docker image... this could take a bit of time")
|
||||
|
||||
// Block continuation until the image has been pulled successfully.
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
@@ -287,7 +302,7 @@ func (ip *InstallationProcess) pullInstallationImage() error {
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -298,30 +313,20 @@ func (ip *InstallationProcess) pullInstallationImage() error {
|
||||
// manner, if either one fails the error is returned.
|
||||
func (ip *InstallationProcess) BeforeExecute() error {
|
||||
if err := ip.writeScriptToDisk(); err != nil {
|
||||
return errors.Wrap(err, "failed to write installation script to disk")
|
||||
return errors.WithMessage(err, "failed to write installation script to disk")
|
||||
}
|
||||
|
||||
if err := ip.pullInstallationImage(); err != nil {
|
||||
return errors.Wrap(err, "failed to pull updated installation container image for server")
|
||||
return errors.WithMessage(err, "failed to pull updated installation container image for server")
|
||||
}
|
||||
|
||||
opts := types.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: true,
|
||||
if err := ip.RemoveContainer(); err != nil {
|
||||
return errors.WithMessage(err, "failed to remove existing install container for server")
|
||||
}
|
||||
|
||||
if err := ip.client.ContainerRemove(ip.context, ip.Server.Id()+"_installer", opts); err != nil {
|
||||
if !client.IsErrNotFound(err) {
|
||||
return errors.Wrap(err, "failed to remove existing install container for server")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the log path for the installation process.
|
||||
func (ip *InstallationProcess) GetLogPath() string {
|
||||
return filepath.Join(config.Get().System.GetInstallLogPath(), ip.Server.Id()+".log")
|
||||
return filepath.Join(config.Get().System.LogDirectory, "/install", ip.Server.Id()+".log")
|
||||
}
|
||||
|
||||
// Cleans up after the execution of the installation process. This grabs the logs from the
|
||||
@@ -338,12 +343,12 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
||||
})
|
||||
|
||||
if err != nil && !client.IsErrNotFound(err) {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(ip.GetLogPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
@@ -372,15 +377,15 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
||||
| ------------------------------
|
||||
`)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tmpl.Execute(f, ip); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, reader); err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -388,6 +393,12 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
||||
|
||||
// Executes the installation process inside a specially created docker container.
|
||||
func (ip *InstallationProcess) Execute() (string, error) {
|
||||
// Create a child context that is canceled once this function is done running. This
|
||||
// will also be canceled if the parent context (from the Server struct) is canceled
|
||||
// which occurs if the server is deleted.
|
||||
ctx, cancel := context.WithCancel(ip.context)
|
||||
defer cancel()
|
||||
|
||||
conf := &container.Config{
|
||||
Hostname: "installer",
|
||||
AttachStdout: true,
|
||||
@@ -436,6 +447,14 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
||||
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
||||
}
|
||||
|
||||
// Ensure the root directory for the server exists properly before attempting
|
||||
// to trigger the reinstall of the server. It is possible the directory would
|
||||
// not exist when this runs if Wings boots with a missing directory and a user
|
||||
// triggers a reinstall before trying to start the server.
|
||||
if err := ip.Server.EnsureDataDirectoryExists(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ip.Server.Log().WithField("install_script", ip.tempDir()+"/install.sh").Info("creating install container for server process")
|
||||
// Remove the temporary directory when the installation process finishes for this server container.
|
||||
defer func() {
|
||||
@@ -446,29 +465,36 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
r, err := ip.client.ContainerCreate(ip.context, conf, hostConf, nil, ip.Server.Id()+"_installer")
|
||||
r, err := ip.client.ContainerCreate(ctx, conf, hostConf, nil, nil, ip.Server.Id()+"_installer")
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
ip.Server.Log().WithField("container_id", r.ID).Info("running installation script for server in container")
|
||||
if err := ip.client.ContainerStart(ip.context, r.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ip.Server.Log().WithField("container_id", r.ID).Info("running installation script for server in container")
|
||||
if err := ip.client.ContainerStart(ctx, r.ID, types.ContainerStartOptions{}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Process the install event in the background by listening to the stream output until the
|
||||
// container has stopped, at which point we'll disconnect from it.
|
||||
//
|
||||
// If there is an error during the streaming output just report it and do nothing else, the
|
||||
// install can still run, the console just won't have any output.
|
||||
go func(id string) {
|
||||
ip.Server.Events().Publish(DaemonMessageEvent, "Starting installation process, this could take a few minutes...")
|
||||
if err := ip.StreamOutput(id); err != nil {
|
||||
ip.Server.Log().WithField("error", err).Error("error while handling output stream for server install process")
|
||||
if err := ip.StreamOutput(ctx, id); err != nil {
|
||||
ip.Server.Log().WithField("error", err).Warn("error connecting to server install stream output")
|
||||
}
|
||||
ip.Server.Events().Publish(DaemonMessageEvent, "Installation process completed.")
|
||||
}(r.ID)
|
||||
|
||||
sChan, eChan := ip.client.ContainerWait(ip.context, r.ID, container.WaitConditionNotRunning)
|
||||
sChan, eChan := ip.client.ContainerWait(ctx, r.ID, container.WaitConditionNotRunning)
|
||||
select {
|
||||
case err := <-eChan:
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
// Once the container has stopped running we can mark the install process as being completed.
|
||||
if err == nil {
|
||||
ip.Server.Events().Publish(DaemonMessageEvent, "Installation process completed.")
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
case <-sChan:
|
||||
}
|
||||
@@ -479,31 +505,25 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
||||
// Streams the output of the installation process to a log file in the server configuration
|
||||
// directory, as well as to a websocket listener so that the process can be viewed in
|
||||
// the panel by administrators.
|
||||
func (ip *InstallationProcess) StreamOutput(id string) error {
|
||||
reader, err := ip.client.ContainerLogs(ip.context, id, types.ContainerLogsOptions{
|
||||
func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) error {
|
||||
reader, err := ip.client.ContainerLogs(ctx, id, types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
return err
|
||||
}
|
||||
|
||||
defer reader.Close()
|
||||
|
||||
s := bufio.NewScanner(reader)
|
||||
for s.Scan() {
|
||||
ip.Server.Events().Publish(InstallOutputEvent, s.Text())
|
||||
evts := ip.Server.Events()
|
||||
err = system.ScanReader(reader, func(line string) {
|
||||
evts.Publish(InstallOutputEvent, line)
|
||||
})
|
||||
if err != nil {
|
||||
ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines")
|
||||
}
|
||||
|
||||
if err := s.Err(); err != nil {
|
||||
ip.Server.Log().WithFields(log.Fields{
|
||||
"container_id": id,
|
||||
"error": errors.WithStack(err),
|
||||
}).Warn("error processing scanner line in installation output for server")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -512,15 +532,13 @@ func (ip *InstallationProcess) StreamOutput(id string) error {
|
||||
// value of "true" means everything was successful, "false" means something went
|
||||
// wrong and the server must be deleted and re-created.
|
||||
func (s *Server) SyncInstallState(successful bool) error {
|
||||
r := api.NewRequester()
|
||||
|
||||
rerr, err := r.SendInstallationStatus(s.Id(), successful)
|
||||
if rerr != nil || err != nil {
|
||||
err := s.client.SetInstallationStatus(s.Context(), s.Id(), successful)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
if !remote.IsRequestError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New(rerr.String())
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -2,14 +2,15 @@ package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/apex/log"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/events"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
)
|
||||
|
||||
var dockerEvents = []string{
|
||||
@@ -18,6 +19,37 @@ var dockerEvents = []string{
|
||||
environment.DockerImagePullCompleted,
|
||||
}
|
||||
|
||||
type diskSpaceLimiter struct {
|
||||
o sync.Once
|
||||
mu sync.Mutex
|
||||
server *Server
|
||||
}
|
||||
|
||||
func newDiskLimiter(s *Server) *diskSpaceLimiter {
|
||||
return &diskSpaceLimiter{server: s}
|
||||
}
|
||||
|
||||
// Reset the disk space limiter status.
|
||||
func (dsl *diskSpaceLimiter) Reset() {
|
||||
dsl.mu.Lock()
|
||||
dsl.o = sync.Once{}
|
||||
dsl.mu.Unlock()
|
||||
}
|
||||
|
||||
// Trigger the disk space limiter which will attempt to stop a running server instance within
|
||||
// 15 seconds, and terminate it forcefully if it does not stop.
|
||||
//
|
||||
// This function is only executed one time, so whenever a server is marked as booting the limiter
|
||||
// should be reset so it can properly be triggered as needed.
|
||||
func (dsl *diskSpaceLimiter) Trigger() {
|
||||
dsl.o.Do(func() {
|
||||
dsl.server.PublishConsoleOutputFromDaemon("Server is exceeding the assigned disk space limit, stopping process now.")
|
||||
if err := dsl.server.Environment.WaitForStop(60, true); err != nil {
|
||||
dsl.server.Log().WithField("error", err).Error("failed to stop server after exceeding space limit!")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Adds all of the internal event listeners we want to use for a server. These listeners can only be
|
||||
// removed by deleting the server as they should last for the duration of the process' lifetime.
|
||||
func (s *Server) StartEventListeners() {
|
||||
@@ -31,21 +63,23 @@ func (s *Server) StartEventListeners() {
|
||||
if err != nil {
|
||||
// If the process is already stopping, just let it continue with that action rather than attempting
|
||||
// to terminate again.
|
||||
if s.GetState() != environment.ProcessStoppingState {
|
||||
s.SetState(environment.ProcessStoppingState)
|
||||
if s.Environment.State() != environment.ProcessStoppingState {
|
||||
s.Environment.SetState(environment.ProcessStoppingState)
|
||||
|
||||
go func() {
|
||||
s.Log().Warn("stopping server instance, violating throttle limits")
|
||||
s.PublishConsoleOutputFromDaemon("Your server is being stopped for outputting too much data in a short period of time.")
|
||||
|
||||
// Completely skip over server power actions and terminate the running instance. This gives the
|
||||
// server 15 seconds to finish stopping gracefully before it is forcefully terminated.
|
||||
if err := s.Environment.WaitForStop(config.Get().Throttles.StopGracePeriod, true); err != nil {
|
||||
// If there is an error set the process back to running so that this throttler is called
|
||||
// again and hopefully kills the server.
|
||||
if s.GetState() != environment.ProcessOfflineState {
|
||||
s.SetState(environment.ProcessRunningState)
|
||||
if s.Environment.State() != environment.ProcessOfflineState {
|
||||
s.Environment.SetState(environment.ProcessRunningState)
|
||||
}
|
||||
|
||||
s.Log().WithField("error", errors.WithStack(err)).Error("failed to terminate environment after triggering throttle")
|
||||
s.Log().WithField("error", err).Error("failed to terminate environment after triggering throttle")
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -60,19 +94,21 @@ func (s *Server) StartEventListeners() {
|
||||
s.onConsoleOutput(e.Data)
|
||||
}
|
||||
|
||||
l := newDiskLimiter(s)
|
||||
state := func(e events.Event) {
|
||||
// Reset the throttler when the process is started.
|
||||
if e.Data == environment.ProcessStartingState {
|
||||
l.Reset()
|
||||
s.Throttler().Reset()
|
||||
}
|
||||
|
||||
s.SetState(e.Data)
|
||||
s.OnStateChange()
|
||||
}
|
||||
|
||||
stats := func(e events.Event) {
|
||||
st := new(environment.Stats)
|
||||
if err := json.Unmarshal([]byte(e.Data), st); err != nil {
|
||||
s.Log().WithField("error", errors.WithStack(err)).Warn("failed to unmarshal server environment stats")
|
||||
s.Log().WithField("error", err).Warn("failed to unmarshal server environment stats")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -81,7 +117,11 @@ func (s *Server) StartEventListeners() {
|
||||
s.resources.Stats = *st
|
||||
s.resources.mu.Unlock()
|
||||
|
||||
s.Filesystem().HasSpaceAvailable(true)
|
||||
// If there is no disk space available at this point, trigger the server disk limiter logic
|
||||
// which will start to stop the running instance.
|
||||
if !s.Filesystem().HasSpaceAvailable(true) {
|
||||
l.Trigger()
|
||||
}
|
||||
|
||||
s.emitProcUsage()
|
||||
}
|
||||
@@ -96,7 +136,7 @@ func (s *Server) StartEventListeners() {
|
||||
}
|
||||
}
|
||||
|
||||
s.Log().Info("registering event listeners: console, state, resources...")
|
||||
s.Log().Debug("registering event listeners: console, state, resources...")
|
||||
s.Environment.Events().On(environment.ConsoleOutputEvent, &console)
|
||||
s.Environment.Events().On(environment.StateChangeEvent, &state)
|
||||
s.Environment.Events().On(environment.ResourceEvent, &stats)
|
||||
@@ -114,7 +154,7 @@ func (s *Server) onConsoleOutput(data string) {
|
||||
processConfiguration := s.ProcessConfiguration()
|
||||
|
||||
// Check if the server is currently starting.
|
||||
if s.GetState() == environment.ProcessStartingState {
|
||||
if s.Environment.State() == environment.ProcessStartingState {
|
||||
// Check if we should strip ansi color codes.
|
||||
if processConfiguration.Startup.StripAnsi {
|
||||
// Strip ansi color codes from the data string.
|
||||
@@ -135,7 +175,7 @@ func (s *Server) onConsoleOutput(data string) {
|
||||
// If the specific line of output is one that would mark the server as started,
|
||||
// set the server to that state. Only do this if the server is not currently stopped
|
||||
// or stopping.
|
||||
_ = s.SetState(environment.ProcessRunningState)
|
||||
s.Environment.SetState(environment.ProcessRunningState)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -146,8 +186,8 @@ func (s *Server) onConsoleOutput(data string) {
|
||||
if s.IsRunning() {
|
||||
stop := processConfiguration.Stop
|
||||
|
||||
if stop.Type == api.ProcessStopCommand && data == stop.Value {
|
||||
_ = s.SetState(environment.ProcessOfflineState)
|
||||
if stop.Type == remote.ProcessStopCommand && data == stop.Value {
|
||||
s.Environment.SetState(environment.ProcessOfflineState)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
142
server/loader.go
142
server/loader.go
@@ -1,142 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/apex/log"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/gammazero/workerpool"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/environment/docker"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var servers = NewCollection(nil)
|
||||
|
||||
func GetServers() *Collection {
|
||||
return servers
|
||||
}
|
||||
|
||||
// Iterates over a given directory and loads all of the servers listed before returning
|
||||
// them to the calling function.
|
||||
func LoadDirectory() error {
|
||||
if len(servers.items) != 0 {
|
||||
return errors.New("cannot call LoadDirectory with a non-nil collection")
|
||||
}
|
||||
|
||||
log.Info("fetching list of servers from API")
|
||||
configs, rerr, err := api.NewRequester().GetAllServerConfigurations()
|
||||
if err != nil || rerr != nil {
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return errors.New(rerr.String())
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
log.WithField("total_configs", len(configs)).Info("processing servers returned by the API")
|
||||
|
||||
pool := workerpool.New(runtime.NumCPU())
|
||||
for uuid, data := range configs {
|
||||
uuid := uuid
|
||||
data := data
|
||||
|
||||
pool.Submit(func() {
|
||||
// Parse the json.RawMessage into an expected struct value. We do this here so that a single broken
|
||||
// server does not cause the entire boot process to hang, and allows us to show more useful error
|
||||
// messaging in the output.
|
||||
d := api.ServerConfigurationResponse{}
|
||||
|
||||
log.WithField("server", uuid).Info("creating new server object from API response")
|
||||
if err := json.Unmarshal(data, &d); err != nil {
|
||||
log.WithField("server", uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
|
||||
return
|
||||
}
|
||||
|
||||
s, err := FromConfiguration(d)
|
||||
if err != nil {
|
||||
log.WithField("server", uuid).WithField("error", err).Error("failed to load server, skipping...")
|
||||
return
|
||||
}
|
||||
|
||||
servers.Add(s)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait until we've processed all of the configuration files in the directory
|
||||
// before continuing.
|
||||
pool.StopWait()
|
||||
|
||||
diff := time.Now().Sub(start)
|
||||
log.WithField("duration", fmt.Sprintf("%s", diff)).Info("finished processing server configurations")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initializes a server using a data byte array. This will be marshaled into the
|
||||
// given struct using a YAML marshaler. This will also configure the given environment
|
||||
// for a server.
|
||||
func FromConfiguration(data api.ServerConfigurationResponse) (*Server, error) {
|
||||
cfg := Configuration{}
|
||||
if err := defaults.Set(&cfg); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set struct defaults for server configuration")
|
||||
}
|
||||
|
||||
s := new(Server)
|
||||
if err := defaults.Set(s); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set struct defaults for server")
|
||||
}
|
||||
|
||||
s.cfg = cfg
|
||||
if err := s.UpdateDataStructure(data.Settings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.resources = ResourceUsage{}
|
||||
defaults.Set(&s.resources)
|
||||
|
||||
s.Archiver = Archiver{Server: s}
|
||||
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace())
|
||||
|
||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||
// this logic in. When we're ready to support other environment we'll need to make
|
||||
// some modifications here obviously.
|
||||
settings := environment.Settings{
|
||||
Mounts: s.Mounts(),
|
||||
Allocations: s.cfg.Allocations,
|
||||
Limits: s.cfg.Build,
|
||||
}
|
||||
|
||||
envCfg := environment.NewConfiguration(settings, s.GetEnvironmentVariables())
|
||||
meta := docker.Metadata{
|
||||
Image: s.Config().Container.Image,
|
||||
}
|
||||
|
||||
if env, err := docker.New(s.Id(), &meta, envCfg); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
s.Environment = env
|
||||
s.StartEventListeners()
|
||||
s.Throttler().StartTimer()
|
||||
}
|
||||
|
||||
// Forces the configuration to be synced with the panel.
|
||||
if err := s.SyncWithConfiguration(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the server's data directory exists, force disk usage calculation.
|
||||
if _, err := os.Stat(s.Filesystem().Path()); err == nil {
|
||||
s.Filesystem().HasSpaceAvailable(true)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user