Compare commits
99 Commits
release/v1
...
develop
Author | SHA1 | Date | |
---|---|---|---|
|
d739948989 | ||
|
ac260bd5ee | ||
|
2f4a0d7262 | ||
|
1d8b383682 | ||
|
934bf2493d | ||
|
29e4425e21 | ||
|
5a15612754 | ||
|
ad1ae862a9 | ||
|
3114a3b82e | ||
|
500f217514 | ||
|
9ffbcdcdb1 | ||
|
9b341db2db | ||
|
71c5338549 | ||
|
326f115f5b | ||
|
06614de99d | ||
|
2b0e35360b | ||
|
202f2229a9 | ||
|
baf1f0b5cd | ||
|
ec54371b86 | ||
|
1d5090957b | ||
|
5415f8ae07 | ||
|
617fbcbf27 | ||
|
c152e36101 | ||
|
5b0422d756 | ||
|
f1c5bbd42d | ||
|
1c5ddcd20c | ||
|
a877305202 | ||
|
1f77d2256b | ||
|
ac9bd1d95e | ||
|
979df34392 | ||
|
8f129931d5 | ||
|
2931430eb8 | ||
|
99b9924a4a | ||
|
d649bb1116 | ||
|
1477b7034b | ||
|
d1c0ca5260 | ||
|
27f3e76c77 | ||
|
eadbe920fe | ||
|
3e804b81fe | ||
|
f68965e7c9 | ||
|
accc833e87 | ||
|
d4bfdd4548 | ||
|
2641080007 | ||
|
10c58d3dc0 | ||
|
9496b1f7e5 | ||
|
2f1b67ed35 | ||
|
579278b4de | ||
|
d30ab7b9bd | ||
|
d1fd0465e4 | ||
|
79eb8e1365 | ||
|
2cb201d202 | ||
|
fc1ffc8cd3 | ||
|
48c55af373 | ||
|
7a59d0929c | ||
|
9b5eaf44df | ||
|
438e5fdbe9 | ||
|
a866493d0a | ||
|
c9d92f7bac | ||
|
aa8ffdfcf7 | ||
|
8d7e23f542 | ||
|
bd26d6eefd | ||
|
9441d2a523 | ||
|
4d51de71c2 | ||
|
4b66a222cd | ||
|
b665c943a2 | ||
|
a50e4ce9d1 | ||
|
c76d68bc96 | ||
|
02cb64e31b | ||
|
639ad76be3 | ||
|
a373bf8eda | ||
|
74b1c46b7f | ||
|
5424c6718e | ||
|
43b3496f00 | ||
|
38c69ebfda | ||
|
234e11b28b | ||
|
ec6d6d83ea | ||
|
4d9fee383f | ||
|
429ac62dba | ||
|
020abec6f2 | ||
|
dac9685298 | ||
|
519d38f238 | ||
|
1d17233d6d | ||
|
774c0af0b0 | ||
|
71fbd9271e | ||
|
2d640209e5 | ||
|
304fd91283 | ||
|
18de96d7b8 | ||
|
a36cab1783 | ||
|
6e0c095bb8 | ||
|
14eea3b1e4 | ||
|
1bc77dc969 | ||
|
b8715d1d4f | ||
|
13d3490bcf | ||
|
e9b8b11fec | ||
|
43b7aa2536 | ||
|
9b8b3c90fb | ||
|
e74d8e3501 | ||
|
4b3bd2ff47 | ||
|
e652d2df84 |
4
.github/workflows/codeql.yaml
vendored
4
.github/workflows/codeql.yaml
vendored
|
@ -13,7 +13,7 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
analyze:
|
analyze:
|
||||||
name: Analyze
|
name: Analyze
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
actions: read
|
actions: read
|
||||||
|
@ -28,7 +28,7 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Code Checkout
|
- name: Code Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@v2
|
||||||
|
|
22
.github/workflows/docker.yaml
vendored
22
.github/workflows/docker.yaml
vendored
|
@ -11,31 +11,33 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
build-and-push:
|
build-and-push:
|
||||||
name: Build and Push
|
name: Build and Push
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
# Always run against a tag, even if the commit into the tag has [docker skip] within the commit message.
|
# Always run against a tag, even if the commit into the tag has [docker skip] within the commit message.
|
||||||
if: "!contains(github.ref, 'develop') || (!contains(github.event.head_commit.message, 'skip docker') && !contains(github.event.head_commit.message, 'docker skip'))"
|
if: "!contains(github.ref, 'develop') || (!contains(github.event.head_commit.message, 'skip docker') && !contains(github.event.head_commit.message, 'docker skip'))"
|
||||||
steps:
|
steps:
|
||||||
- name: Code checkout
|
- name: Code checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Docker Meta
|
- name: Docker metadata
|
||||||
id: docker_meta
|
id: docker_meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ghcr.io/pterodactyl/wings
|
images: ghcr.io/pterodactyl/wings
|
||||||
|
flavor: |
|
||||||
|
latest=false
|
||||||
tags: |
|
tags: |
|
||||||
type=raw,value=latest,enable=${{ github.event_name == 'release' && github.event.action == 'published' && github.event.release.prerelease == false }}
|
type=raw,value=latest,enable=${{ github.event_name == 'release' && github.event.action == 'published' && github.event.release.prerelease == false }}
|
||||||
type=ref,event=tag
|
type=ref,event=tag
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
|
|
||||||
- name: Setup QEMU
|
- name: Setup QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Install buildx
|
- name: Setup Docker buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GitHub Container Registry
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
|
@ -48,7 +50,7 @@ jobs:
|
||||||
echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Build and Push (tag)
|
- name: Build and Push (tag)
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v5
|
||||||
if: "github.event_name == 'release' && github.event.action == 'published'"
|
if: "github.event_name == 'release' && github.event.action == 'published'"
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
|
@ -61,7 +63,7 @@ jobs:
|
||||||
tags: ${{ steps.docker_meta.outputs.tags }}
|
tags: ${{ steps.docker_meta.outputs.tags }}
|
||||||
|
|
||||||
- name: Build and Push (develop)
|
- name: Build and Push (develop)
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v5
|
||||||
if: "github.event_name == 'push' && contains(github.ref, 'develop')"
|
if: "github.event_name == 'push' && contains(github.ref, 'develop')"
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
|
|
43
.github/workflows/push.yaml
vendored
43
.github/workflows/push.yaml
vendored
|
@ -15,44 +15,19 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-20.04]
|
os: [ubuntu-22.04]
|
||||||
go: ["1.18.8", "1.19.3"]
|
go: ["1.22.5"]
|
||||||
goos: [linux]
|
goos: [linux]
|
||||||
goarch: [amd64, arm64]
|
goarch: [amd64, arm64]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go }}
|
go-version: ${{ matrix.go }}
|
||||||
|
|
||||||
- name: Code Checkout
|
- name: Code checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Gather environment variables
|
|
||||||
id: env
|
|
||||||
run: |
|
|
||||||
printf "Go Executable Path: $(which go)\n"
|
|
||||||
printf "Go Version: $(go version)\n"
|
|
||||||
printf "\n\nGo Environment:\n\n"
|
|
||||||
go env
|
|
||||||
printf "\n\nSystem Environment:\n\n"
|
|
||||||
env
|
|
||||||
printf "Git Version: $(git version)\n\n"
|
|
||||||
echo "version_tag=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
|
||||||
echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
|
||||||
echo "go_cache=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
|
||||||
echo "go_mod_cache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Build Cache
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
path: |
|
|
||||||
${{ steps.env.outputs.go_cache }}
|
|
||||||
${{ steps.env.outputs.go_mod_cache }}
|
|
||||||
|
|
||||||
- name: go mod download
|
- name: go mod download
|
||||||
env:
|
env:
|
||||||
|
@ -86,15 +61,15 @@ jobs:
|
||||||
go test -race $(go list ./...)
|
go test -race $(go list ./...)
|
||||||
|
|
||||||
- name: Upload Release Artifact
|
- name: Upload Release Artifact
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') && matrix.go == '1.22.5' }}
|
||||||
with:
|
with:
|
||||||
name: wings_linux_${{ matrix.goarch }}
|
name: wings_linux_${{ matrix.goarch }}
|
||||||
path: dist/wings
|
path: dist/wings
|
||||||
|
|
||||||
- name: Upload Debug Artifact
|
- name: Upload Debug Artifact
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v4
|
||||||
if: ${{ github.ref == 'refs/heads/develop' || github.event_name == 'pull_request' }}
|
if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') && matrix.go == '1.22.5' }}
|
||||||
with:
|
with:
|
||||||
name: wings_linux_${{ matrix.goarch }}_debug
|
name: wings_linux_${{ matrix.goarch }}_debug
|
||||||
path: dist/wings_debug
|
path: dist/wings_debug
|
||||||
|
|
10
.github/workflows/release.yaml
vendored
10
.github/workflows/release.yaml
vendored
|
@ -8,16 +8,16 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
release:
|
release:
|
||||||
name: Release
|
name: Release
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Code Checkout
|
- name: Code Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: "1.18.8"
|
go-version: "1.22.5"
|
||||||
|
|
||||||
- name: Build release binaries
|
- name: Build release binaries
|
||||||
env:
|
env:
|
||||||
|
@ -62,8 +62,6 @@ jobs:
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
name: ${{ github.ref }}
|
|
||||||
tag_name: ${{ github.ref }}
|
|
||||||
draft: true
|
draft: true
|
||||||
prerelease: ${{ contains(github.ref, 'rc') || contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
|
prerelease: ${{ contains(github.ref, 'rc') || contains(github.ref, 'beta') || contains(github.ref, 'alpha') }}
|
||||||
body_path: ./RELEASE_CHANGELOG
|
body_path: ./RELEASE_CHANGELOG
|
||||||
|
|
103
CHANGELOG.md
103
CHANGELOG.md
|
@ -1,5 +1,96 @@
|
||||||
# Changelog
|
# Changelog
|
||||||
|
|
||||||
|
## v1.11.14
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
* Support relative file paths for the Wings config ([#180](https://github.com/pterodactyl/wings/pull/180))
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
* Folders not being sorted before files properly ([#5078](https://github.com/pterodactyl/panel/issues/5078)
|
||||||
|
|
||||||
|
## v1.11.13
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
* Auto-configure not working ([#5087](https://github.com/pterodactyl/panel/issues/5087))
|
||||||
|
* Individual files unable to be decompressed ([#5034](https://github.com/pterodactyl/panel/issues/5034))
|
||||||
|
|
||||||
|
## v1.11.12
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
* Arbitrary File Write/Read ([GHSA-gqmf-jqgv-v8fw](https://github.com/pterodactyl/wings/security/advisories/GHSA-gqmf-jqgv-v8fw))
|
||||||
|
* Server-side Request Forgery (SSRF) during remote file pull ([GHSA-qq22-jj8x-4wwv](https://github.com/pterodactyl/wings/security/advisories/GHSA-qq22-jj8x-4wwv))
|
||||||
|
* Invalid `Content-Type` being used with the `wings diagnostics` command ([#186](https://github.com/pterodactyl/wings/pull/186))
|
||||||
|
|
||||||
|
## v1.11.11
|
||||||
|
### Fixed
|
||||||
|
* Backups missing content when a `.pteroignore` file is used
|
||||||
|
* Archives originating from a subdirectory not containing any files ([#5030](https://github.com/pterodactyl/panel/issues/5030))
|
||||||
|
|
||||||
|
## v1.11.10
|
||||||
|
### Fixed
|
||||||
|
* Archives randomly ignoring files and directories ([#5027](https://github.com/pterodactyl/panel/issues/5027))
|
||||||
|
* Crash when deleting or transferring a server ([#5028](https://github.com/pterodactyl/panel/issues/5028))
|
||||||
|
|
||||||
|
## v1.11.9
|
||||||
|
### Changed
|
||||||
|
* Release binaries are now built with Go 1.21.8
|
||||||
|
* Updated Go dependencies
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
* [CVE-2024-27102](https://www.cve.org/CVERecord?id=CVE-2024-27102)
|
||||||
|
|
||||||
|
## v1.11.8
|
||||||
|
### Changed
|
||||||
|
* Release binaries are now built with Go 1.20.10 (resolves [CVE-2023-44487](https://www.cve.org/CVERecord?id=CVE-2023-44487))
|
||||||
|
* Updated Go dependencies
|
||||||
|
|
||||||
|
## v1.11.7
|
||||||
|
### Changed
|
||||||
|
* Updated Go dependencies (this resolves an issue related to `http: invalid Host header` with Docker)
|
||||||
|
* Wings is now built with go1.19.11
|
||||||
|
|
||||||
|
## v1.11.6
|
||||||
|
### Fixed
|
||||||
|
* CVE-2023-32080
|
||||||
|
|
||||||
|
## v1.11.5
|
||||||
|
### Added
|
||||||
|
* Added a config option to disable Wings config.yml updates from the Panel (https://github.com/pterodactyl/wings/commit/ec6d6d83ea3eb14995c24f001233e85b37ffb87b)
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
* Wings is now built with Go 1.19.7
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
* Fixed archives containing partially matched file names (https://github.com/pterodactyl/wings/commit/43b3496f0001cec231c80af1f9a9b3417d04e8d4)
|
||||||
|
|
||||||
|
## v1.11.4
|
||||||
|
### Fixed
|
||||||
|
* CVE-2023-25168
|
||||||
|
|
||||||
|
## v1.11.3
|
||||||
|
### Fixed
|
||||||
|
* CVE-2023-25152
|
||||||
|
|
||||||
|
## v1.11.2
|
||||||
|
### Fixed
|
||||||
|
* Backups being restored from remote storage (s3) erroring out due to a closed stream.
|
||||||
|
* Fix IP validation logic for activity logs filtering out valid IPs instead of invalid IPs
|
||||||
|
|
||||||
|
## v1.11.1
|
||||||
|
### Changed
|
||||||
|
* Release binaries are now built with Go 1.18.10
|
||||||
|
* Timeout when stopping a server before a transfer begins has been reduced to 15 seconds from 1 minute
|
||||||
|
* Removed insecure SSH protocols for use with the SFTP server
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
* Unnecessary Docker client connections being left open, causing a slow leak of file descriptors
|
||||||
|
* Files being left open in parts of the server's filesystem, causing a leak of file descriptors
|
||||||
|
* IPv6 addresses being corrupted by flawed port stripping logic for activity logs, old entries with malformed IPs will be deleted from the local SQLite database automatically
|
||||||
|
* A server that times out while being stopped at the beginning of a transfer no longer causes the server to become stuck in a transferring state
|
||||||
|
|
||||||
## v1.11.0
|
## v1.11.0
|
||||||
### Added (since 1.7.2)
|
### Added (since 1.7.2)
|
||||||
* More detailed information returned by the `/api/system` endpoint when using the `?v=2` query parameter.
|
* More detailed information returned by the `/api/system` endpoint when using the `?v=2` query parameter.
|
||||||
|
@ -43,6 +134,18 @@
|
||||||
* Archive progress is now reported correctly.
|
* Archive progress is now reported correctly.
|
||||||
* Labels for containers can now be set by the Panel.
|
* Labels for containers can now be set by the Panel.
|
||||||
|
|
||||||
|
## v1.7.5
|
||||||
|
### Fixed
|
||||||
|
* CVE-2023-32080
|
||||||
|
|
||||||
|
## v1.7.4
|
||||||
|
### Fixed
|
||||||
|
* CVE-2023-25168
|
||||||
|
|
||||||
|
## v1.7.3
|
||||||
|
### Fixed
|
||||||
|
* CVE-2023-25152
|
||||||
|
|
||||||
## v1.7.2
|
## v1.7.2
|
||||||
### Fixed
|
### Fixed
|
||||||
* The S3 backup driver now supports Cloudflare R2
|
* The S3 backup driver now supports Cloudflare R2
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Stage 1 (Build)
|
# Stage 1 (Build)
|
||||||
FROM golang:1.18-alpine AS builder
|
FROM golang:1.22.5-alpine AS builder
|
||||||
|
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
RUN apk add --update --no-cache git make
|
RUN apk add --update --no-cache git make
|
||||||
|
|
23
README.md
23
README.md
|
@ -15,21 +15,18 @@ dependencies, and allowing users to authenticate with the same credentials they
|
||||||
|
|
||||||
## Sponsors
|
## Sponsors
|
||||||
|
|
||||||
I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's developement.
|
I would like to extend my sincere thanks to the following sponsors for helping fund Pterodactyl's development.
|
||||||
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
|
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
|
||||||
|
|
||||||
| Company | About |
|
| Company | About |
|
||||||
|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|--------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| [**WISP**](https://wisp.gg) | Extra features. |
|
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
|
||||||
| [**Fragnet**](https://fragnet.net) | Providing low latency, high-end game hosting solutions to gamers, game studios and eSports platforms. |
|
| [**CodeNode LLC**](https://codenode.gg/) | Looking for simplicity? Well, look no further! CodeNode has got you covered with everything you need at the rock-bottom price of $1.75 per GB, including dedicated IPs in Dallas, Texas, and Amsterdam, Netherlands. We're not just good, we're the best in the game! |
|
||||||
| [**RocketNode**](https://rocketnode.com/) | Innovative game server hosting combined with a straightforward control panel, affordable prices, and Rocket-Fast support. |
|
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
|
||||||
| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
|
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
|
||||||
| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
|
| [**HostEZ**](https://hostez.io) | US & EU Rust & Minecraft Hosting. DDoS Protected bare metal, VPS and colocation with low latency, high uptime and maximum availability. EZ! |
|
||||||
| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
|
| [**Blueprint**](https://blueprint.zip/?pterodactyl=true) | Create and install Pterodactyl addons and themes with the growing Blueprint framework - the package-manager for Pterodactyl. Use multiple modifications at once without worrying about conflicts and make use of the large extension ecosystem. |
|
||||||
| [**Skynode**](https://www.skynode.pro/) | Skynode provides blazing fast game servers along with a top-notch user experience. Whatever our clients are looking for, we're able to provide it! |
|
| [**indifferent broccoli**](https://indifferentbroccoli.com/) | indifferent broccoli is a game server hosting and rental company. With us, you get top-notch computer power for your gaming sessions. We destroy lag, latency, and complexity--letting you focus on the fun stuff. |
|
||||||
| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa. |
|
|
||||||
| [**Pterodactyl Market**](https://pterodactylmarket.com/) | Pterodactyl Market is a one-and-stop shop for Pterodactyl. In our market, you can find Add-ons, Themes, Eggs, and more for Pterodactyl. |
|
|
||||||
| [**UltraServers**](https://ultraservers.com/) | Deploy premium games hosting with the click of a button. Manage and swap games with ease and let us take care of the rest. We currently support Minecraft, Rust, ARK, 7 Days to Die, Garys MOD, CS:GO, Satisfactory and others. |
|
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
|
|
|
@ -155,6 +155,9 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Manually specify the Panel URL as it won't be decoded from JSON.
|
||||||
|
cfg.PanelLocation = configureArgs.PanelURL
|
||||||
|
|
||||||
if err = config.WriteToDisk(cfg); err != nil {
|
if err = config.WriteToDisk(cfg); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -229,8 +229,8 @@ func uploadToHastebin(hbUrl, content string) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
u.Path = path.Join(u.Path, "documents")
|
u.Path = path.Join(u.Path, "documents")
|
||||||
res, err := http.Post(u.String(), "plain/text", r)
|
res, err := http.Post(u.String(), "text/plain", r)
|
||||||
if err != nil || res.StatusCode != 200 {
|
if err != nil || res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||||
fmt.Println("Failed to upload report to ", u.String(), err)
|
fmt.Println("Failed to upload report to ", u.String(), err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
19
cmd/root.go
19
cmd/root.go
|
@ -13,7 +13,6 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/NYTimes/logrotate"
|
"github.com/NYTimes/logrotate"
|
||||||
|
@ -113,6 +112,9 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
||||||
if err := config.EnsurePterodactylUser(); err != nil {
|
if err := config.EnsurePterodactylUser(); err != nil {
|
||||||
log.WithField("error", err).Fatal("failed to create pterodactyl system user")
|
log.WithField("error", err).Fatal("failed to create pterodactyl system user")
|
||||||
}
|
}
|
||||||
|
if err := config.ConfigurePasswd(); err != nil {
|
||||||
|
log.WithField("error", err).Fatal("failed to configure container passwd file")
|
||||||
|
}
|
||||||
log.WithFields(log.Fields{
|
log.WithFields(log.Fields{
|
||||||
"username": config.Get().System.Username,
|
"username": config.Get().System.Username,
|
||||||
"uid": config.Get().System.User.Uid,
|
"uid": config.Get().System.User.Uid,
|
||||||
|
@ -379,13 +381,14 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
||||||
// Reads the configuration from the disk and then sets up the global singleton
|
// Reads the configuration from the disk and then sets up the global singleton
|
||||||
// with all the configuration values.
|
// with all the configuration values.
|
||||||
func initConfig() {
|
func initConfig() {
|
||||||
if !strings.HasPrefix(configPath, "/") {
|
if !filepath.IsAbs(configPath) {
|
||||||
d, err := os.Getwd()
|
d, err := filepath.Abs(configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log2.Fatalf("cmd/root: could not determine directory: %s", err)
|
log2.Fatalf("cmd/root: failed to get path to config file: %s", err)
|
||||||
}
|
}
|
||||||
configPath = path.Clean(path.Join(d, configPath))
|
configPath = d
|
||||||
}
|
}
|
||||||
|
|
||||||
err := config.FromFile(configPath)
|
err := config.FromFile(configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
@ -440,18 +443,18 @@ in all copies or substantial portions of the Software.%s`), system.Version, time
|
||||||
}
|
}
|
||||||
|
|
||||||
func exitWithConfigurationNotice() {
|
func exitWithConfigurationNotice() {
|
||||||
fmt.Print(colorstring.Color(`
|
fmt.Printf(colorstring.Color(`
|
||||||
[_red_][white][bold]Error: Configuration File Not Found[reset]
|
[_red_][white][bold]Error: Configuration File Not Found[reset]
|
||||||
|
|
||||||
Wings was not able to locate your configuration file, and therefore is not
|
Wings was not able to locate your configuration file, and therefore is not
|
||||||
able to complete its boot process. Please ensure you have copied your instance
|
able to complete its boot process. Please ensure you have copied your instance
|
||||||
configuration file into the default location below.
|
configuration file into the default location below.
|
||||||
|
|
||||||
Default Location: /etc/pterodactyl/config.yml
|
Default Location: %s
|
||||||
|
|
||||||
[yellow]This is not a bug with this software. Please do not make a bug report
|
[yellow]This is not a bug with this software. Please do not make a bug report
|
||||||
for this issue, it will be closed.[reset]
|
for this issue, it will be closed.[reset]
|
||||||
|
|
||||||
`))
|
`), config.DefaultLocation)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
113
config/config.go
113
config/config.go
|
@ -12,6 +12,7 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -20,6 +21,7 @@ import (
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/creasty/defaults"
|
"github.com/creasty/defaults"
|
||||||
"github.com/gbrlsnchs/jwt/v3"
|
"github.com/gbrlsnchs/jwt/v3"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
|
@ -87,7 +89,7 @@ type ApiConfiguration struct {
|
||||||
// Determines if functionality for allowing remote download of files into server directories
|
// Determines if functionality for allowing remote download of files into server directories
|
||||||
// is enabled on this instance. If set to "true" remote downloads will not be possible for
|
// is enabled on this instance. If set to "true" remote downloads will not be possible for
|
||||||
// servers.
|
// servers.
|
||||||
DisableRemoteDownload bool `json:"disable_remote_download" yaml:"disable_remote_download"`
|
DisableRemoteDownload bool `json:"-" yaml:"disable_remote_download"`
|
||||||
|
|
||||||
// The maximum size for files uploaded through the Panel in MB.
|
// The maximum size for files uploaded through the Panel in MB.
|
||||||
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
|
||||||
|
@ -121,23 +123,23 @@ type RemoteQueryConfiguration struct {
|
||||||
// SystemConfiguration defines basic system configuration settings.
|
// SystemConfiguration defines basic system configuration settings.
|
||||||
type SystemConfiguration struct {
|
type SystemConfiguration struct {
|
||||||
// The root directory where all of the pterodactyl data is stored at.
|
// The root directory where all of the pterodactyl data is stored at.
|
||||||
RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"`
|
RootDirectory string `default:"/var/lib/pterodactyl" json:"-" yaml:"root_directory"`
|
||||||
|
|
||||||
// Directory where logs for server installations and other wings events are logged.
|
// Directory where logs for server installations and other wings events are logged.
|
||||||
LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"`
|
LogDirectory string `default:"/var/log/pterodactyl" json:"-" yaml:"log_directory"`
|
||||||
|
|
||||||
// Directory where the server data is stored at.
|
// Directory where the server data is stored at.
|
||||||
Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"`
|
Data string `default:"/var/lib/pterodactyl/volumes" json:"-" yaml:"data"`
|
||||||
|
|
||||||
// Directory where server archives for transferring will be stored.
|
// Directory where server archives for transferring will be stored.
|
||||||
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"`
|
ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" json:"-" yaml:"archive_directory"`
|
||||||
|
|
||||||
// Directory where local backups will be stored on the machine.
|
// Directory where local backups will be stored on the machine.
|
||||||
BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
|
BackupDirectory string `default:"/var/lib/pterodactyl/backups" json:"-" yaml:"backup_directory"`
|
||||||
|
|
||||||
// TmpDirectory specifies where temporary files for Pterodactyl installation processes
|
// TmpDirectory specifies where temporary files for Pterodactyl installation processes
|
||||||
// should be created. This supports environments running docker-in-docker.
|
// should be created. This supports environments running docker-in-docker.
|
||||||
TmpDirectory string `default:"/tmp/pterodactyl" yaml:"tmp_directory"`
|
TmpDirectory string `default:"/tmp/pterodactyl" json:"-" yaml:"tmp_directory"`
|
||||||
|
|
||||||
// The user that should own all of the server files, and be used for containers.
|
// The user that should own all of the server files, and be used for containers.
|
||||||
Username string `default:"pterodactyl" yaml:"username"`
|
Username string `default:"pterodactyl" yaml:"username"`
|
||||||
|
@ -170,6 +172,25 @@ type SystemConfiguration struct {
|
||||||
Gid int `yaml:"gid"`
|
Gid int `yaml:"gid"`
|
||||||
} `yaml:"user"`
|
} `yaml:"user"`
|
||||||
|
|
||||||
|
// Passwd controls the mounting of a generated passwd files into containers started by Wings.
|
||||||
|
Passwd struct {
|
||||||
|
// Enable controls whether generated passwd files should be mounted into containers.
|
||||||
|
//
|
||||||
|
// By default this option is disabled and Wings will not mount any additional passwd
|
||||||
|
// files into containers.
|
||||||
|
Enable bool `yaml:"enabled" default:"false"`
|
||||||
|
|
||||||
|
// Directory is the directory on disk where the generated files will be stored.
|
||||||
|
// This directory may be temporary as it will be re-created whenever Wings is started.
|
||||||
|
//
|
||||||
|
// This path **WILL** be both written to by Wings and mounted into containers created by
|
||||||
|
// Wings. If you are running Wings itself in a container, this path will need to be mounted
|
||||||
|
// into the Wings container as the exact path on the host, which should match the value
|
||||||
|
// specified here. If you are using SELinux, you will need to make sure this file has the
|
||||||
|
// correct SELinux context in order for containers to use it.
|
||||||
|
Directory string `yaml:"directory" default:"/run/wings/etc"`
|
||||||
|
} `yaml:"passwd"`
|
||||||
|
|
||||||
// The amount of time in seconds that can elapse before a server's disk space calculation is
|
// The amount of time in seconds that can elapse before a server's disk space calculation is
|
||||||
// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
|
// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
|
||||||
// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
|
// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
|
||||||
|
@ -209,6 +230,8 @@ type SystemConfiguration struct {
|
||||||
Backups Backups `yaml:"backups"`
|
Backups Backups `yaml:"backups"`
|
||||||
|
|
||||||
Transfers Transfers `yaml:"transfers"`
|
Transfers Transfers `yaml:"transfers"`
|
||||||
|
|
||||||
|
OpenatMode string `default:"auto" yaml:"openat_mode"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type CrashDetection struct {
|
type CrashDetection struct {
|
||||||
|
@ -302,7 +325,7 @@ type Configuration struct {
|
||||||
|
|
||||||
// The location where the panel is running that this daemon should connect to
|
// The location where the panel is running that this daemon should connect to
|
||||||
// to collect data and send events.
|
// to collect data and send events.
|
||||||
PanelLocation string `json:"remote" yaml:"remote"`
|
PanelLocation string `json:"-" yaml:"remote"`
|
||||||
RemoteQuery RemoteQueryConfiguration `json:"remote_query" yaml:"remote_query"`
|
RemoteQuery RemoteQueryConfiguration `json:"remote_query" yaml:"remote_query"`
|
||||||
|
|
||||||
// AllowedMounts is a list of allowed host-system mount points.
|
// AllowedMounts is a list of allowed host-system mount points.
|
||||||
|
@ -319,6 +342,9 @@ type Configuration struct {
|
||||||
// is only required by users running Wings without SSL certificates and using internal IP
|
// is only required by users running Wings without SSL certificates and using internal IP
|
||||||
// addresses in order to connect. Most users should NOT enable this setting.
|
// addresses in order to connect. Most users should NOT enable this setting.
|
||||||
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
|
AllowCORSPrivateNetwork bool `json:"allow_cors_private_network" yaml:"allow_cors_private_network"`
|
||||||
|
|
||||||
|
// IgnorePanelConfigUpdates causes confiuration updates that are sent by the panel to be ignored.
|
||||||
|
IgnorePanelConfigUpdates bool `json:"ignore_panel_config_updates" yaml:"ignore_panel_config_updates"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAtPath creates a new struct and set the path where it should be stored.
|
// NewAtPath creates a new struct and set the path where it should be stored.
|
||||||
|
@ -490,6 +516,37 @@ func EnsurePterodactylUser() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConfigurePasswd generates required passwd files for use with containers started by Wings.
|
||||||
|
func ConfigurePasswd() error {
|
||||||
|
passwd := _config.System.Passwd
|
||||||
|
if !passwd.Enable {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v := []byte(fmt.Sprintf(
|
||||||
|
`root:x:0:
|
||||||
|
container:x:%d:
|
||||||
|
nogroup:x:65534:`,
|
||||||
|
_config.System.User.Gid,
|
||||||
|
))
|
||||||
|
if err := os.WriteFile(filepath.Join(passwd.Directory, "group"), v, 0o644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write file to %s/group: %v", passwd.Directory, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
v = []byte(fmt.Sprintf(
|
||||||
|
`root:x:0:0::/root:/bin/sh
|
||||||
|
container:x:%d:%d::/home/container:/bin/sh
|
||||||
|
nobody:x:65534:65534::/var/empty:/bin/sh
|
||||||
|
`,
|
||||||
|
_config.System.User.Uid,
|
||||||
|
_config.System.User.Gid,
|
||||||
|
))
|
||||||
|
if err := os.WriteFile(filepath.Join(passwd.Directory, "passwd"), v, 0o644); err != nil {
|
||||||
|
return fmt.Errorf("failed to write file to %s/passwd: %v", passwd.Directory, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// FromFile reads the configuration from the provided file and stores it in the
|
// FromFile reads the configuration from the provided file and stores it in the
|
||||||
// global singleton for this instance.
|
// global singleton for this instance.
|
||||||
func FromFile(path string) error {
|
func FromFile(path string) error {
|
||||||
|
@ -554,6 +611,13 @@ func ConfigureDirectories() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _config.System.Passwd.Enable {
|
||||||
|
log.WithField("path", _config.System.Passwd.Directory).Debug("ensuring passwd directory exists")
|
||||||
|
if err := os.MkdirAll(_config.System.Passwd.Directory, 0o755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -668,3 +732,36 @@ func getSystemName() (string, error) {
|
||||||
}
|
}
|
||||||
return release["ID"], nil
|
return release["ID"], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
openat2 atomic.Bool
|
||||||
|
openat2Set atomic.Bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func UseOpenat2() bool {
|
||||||
|
if openat2Set.Load() {
|
||||||
|
return openat2.Load()
|
||||||
|
}
|
||||||
|
defer openat2Set.Store(true)
|
||||||
|
|
||||||
|
c := Get()
|
||||||
|
openatMode := c.System.OpenatMode
|
||||||
|
switch openatMode {
|
||||||
|
case "openat2":
|
||||||
|
openat2.Store(true)
|
||||||
|
return true
|
||||||
|
case "openat":
|
||||||
|
openat2.Store(false)
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
fd, err := unix.Openat2(unix.AT_FDCWD, "/", &unix.OpenHow{})
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Warn("error occurred while checking for openat2 support, falling back to openat")
|
||||||
|
openat2.Store(false)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_ = unix.Close(fd)
|
||||||
|
openat2.Store(true)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -4,8 +4,8 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
|
"github.com/docker/docker/api/types/registry"
|
||||||
"github.com/goccy/go-json"
|
"github.com/goccy/go-json"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ type RegistryConfiguration struct {
|
||||||
// Base64 returns the authentication for a given registry as a base64 encoded
|
// Base64 returns the authentication for a given registry as a base64 encoded
|
||||||
// string value.
|
// string value.
|
||||||
func (c RegistryConfiguration) Base64() (string, error) {
|
func (c RegistryConfiguration) Base64() (string, error) {
|
||||||
b, err := json.Marshal(types.AuthConfig{
|
b, err := json.Marshal(registry.AuthConfig{
|
||||||
Username: c.Username,
|
Username: c.Username,
|
||||||
Password: c.Password,
|
Password: c.Password,
|
||||||
})
|
})
|
||||||
|
|
|
@ -58,7 +58,7 @@ func (e *Environment) Attach(ctx context.Context) error {
|
||||||
|
|
||||||
// Set the stream again with the container.
|
// Set the stream again with the container.
|
||||||
if st, err := e.client.ContainerAttach(ctx, e.Id, opts); err != nil {
|
if st, err := e.client.ContainerAttach(ctx, e.Id, opts); err != nil {
|
||||||
return err
|
return errors.WrapIf(err, "environment/docker: error while attaching to container")
|
||||||
} else {
|
} else {
|
||||||
e.SetStream(&st)
|
e.SetStream(&st)
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ func (e *Environment) Create() error {
|
||||||
if _, err := e.ContainerInspect(ctx); err == nil {
|
if _, err := e.ContainerInspect(ctx); err == nil {
|
||||||
return nil
|
return nil
|
||||||
} else if !client.IsErrNotFound(err) {
|
} else if !client.IsErrNotFound(err) {
|
||||||
return errors.Wrap(err, "environment/docker: failed to inspect container")
|
return errors.WrapIf(err, "environment/docker: failed to inspect container")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to pull the requested image before creating the container.
|
// Try to pull the requested image before creating the container.
|
||||||
|
|
|
@ -161,7 +161,7 @@ func (e *Environment) ExitState() (uint32, bool, error) {
|
||||||
if client.IsErrNotFound(err) {
|
if client.IsErrNotFound(err) {
|
||||||
return 1, false, nil
|
return 1, false, nil
|
||||||
}
|
}
|
||||||
return 0, false, err
|
return 0, false, errors.WrapIf(err, "environment/docker: failed to inspect container")
|
||||||
}
|
}
|
||||||
return uint32(c.State.ExitCode), c.State.OOMKilled, nil
|
return uint32(c.State.ExitCode), c.State.OOMKilled, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
@ -103,7 +102,7 @@ func (e *Environment) Start(ctx context.Context) error {
|
||||||
// exists on the system, and rebuild the container if that is required for server booting to
|
// exists on the system, and rebuild the container if that is required for server booting to
|
||||||
// occur.
|
// occur.
|
||||||
if err := e.OnBeforeStart(ctx); err != nil {
|
if err := e.OnBeforeStart(ctx); err != nil {
|
||||||
return errors.WithStackIf(err)
|
return errors.WrapIf(err, "environment/docker: failed to run pre-boot process")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we cannot start & attach to the container in 30 seconds something has gone
|
// If we cannot start & attach to the container in 30 seconds something has gone
|
||||||
|
@ -119,7 +118,7 @@ func (e *Environment) Start(ctx context.Context) error {
|
||||||
// By explicitly attaching to the instance before we start it, we can immediately
|
// By explicitly attaching to the instance before we start it, we can immediately
|
||||||
// react to errors/output stopping/etc. when starting.
|
// react to errors/output stopping/etc. when starting.
|
||||||
if err := e.Attach(actx); err != nil {
|
if err := e.Attach(actx); err != nil {
|
||||||
return err
|
return errors.WrapIf(err, "environment/docker: failed to attach to container")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := e.client.ContainerStart(actx, e.Id, types.ContainerStartOptions{}); err != nil {
|
if err := e.client.ContainerStart(actx, e.Id, types.ContainerStartOptions{}); err != nil {
|
||||||
|
@ -143,48 +142,55 @@ func (e *Environment) Stop(ctx context.Context) error {
|
||||||
s := e.meta.Stop
|
s := e.meta.Stop
|
||||||
e.mu.RUnlock()
|
e.mu.RUnlock()
|
||||||
|
|
||||||
// A native "stop" as the Type field value will just skip over all of this
|
|
||||||
// logic and end up only executing the container stop command (which may or
|
|
||||||
// may not work as expected).
|
|
||||||
if s.Type == "" || s.Type == remote.ProcessStopSignal {
|
|
||||||
if s.Type == "" {
|
|
||||||
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using termination procedure")
|
|
||||||
}
|
|
||||||
|
|
||||||
signal := os.Kill
|
|
||||||
// Handle a few common cases, otherwise just fall through and just pass along
|
|
||||||
// the os.Kill signal to the process.
|
|
||||||
switch strings.ToUpper(s.Value) {
|
|
||||||
case "SIGABRT":
|
|
||||||
signal = syscall.SIGABRT
|
|
||||||
case "SIGINT":
|
|
||||||
signal = syscall.SIGINT
|
|
||||||
case "SIGTERM":
|
|
||||||
signal = syscall.SIGTERM
|
|
||||||
}
|
|
||||||
return e.Terminate(ctx, signal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the process is already offline don't switch it back to stopping. Just leave it how
|
// If the process is already offline don't switch it back to stopping. Just leave it how
|
||||||
// it is and continue through to the stop handling for the process.
|
// it is and continue through to the stop handling for the process.
|
||||||
if e.st.Load() != environment.ProcessOfflineState {
|
if e.st.Load() != environment.ProcessOfflineState {
|
||||||
e.SetState(environment.ProcessStoppingState)
|
e.SetState(environment.ProcessStoppingState)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle signal based actions
|
||||||
|
if s.Type == remote.ProcessStopSignal {
|
||||||
|
log.WithField("signal_value", s.Value).Debug("stopping server using signal")
|
||||||
|
|
||||||
|
// Handle some common signals - Default to SIGKILL
|
||||||
|
signal := "SIGKILL"
|
||||||
|
switch strings.ToUpper(s.Value) {
|
||||||
|
case "SIGABRT":
|
||||||
|
signal = "SIGABRT"
|
||||||
|
case "SIGINT", "C":
|
||||||
|
signal = "SIGINT"
|
||||||
|
case "SIGTERM":
|
||||||
|
signal = "SIGTERM"
|
||||||
|
case "SIGKILL":
|
||||||
|
signal = "SIGKILL"
|
||||||
|
default:
|
||||||
|
log.Info("Unrecognised signal requested, defaulting to SIGKILL")
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.SignalContainer(ctx, signal)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle command based stops
|
||||||
// Only attempt to send the stop command to the instance if we are actually attached to
|
// Only attempt to send the stop command to the instance if we are actually attached to
|
||||||
// the instance. If we are not for some reason, just send the container stop event.
|
// the instance. If we are not for some reason, just send the container stop event.
|
||||||
if e.IsAttached() && s.Type == remote.ProcessStopCommand {
|
if e.IsAttached() && s.Type == remote.ProcessStopCommand {
|
||||||
return e.SendCommand(s.Value)
|
return e.SendCommand(s.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allow the stop action to run for however long it takes, similar to executing a command
|
if s.Type == "" {
|
||||||
// and using a different logic pathway to wait for the container to stop successfully.
|
log.WithField("container_id", e.Id).Warn("no stop configuration detected for environment, using native docker stop")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to a native docker stop. As we aren't passing a signal to ContainerStop docker will
|
||||||
|
// attempt to stop the container using the default stop signal, SIGTERM, unless
|
||||||
|
// another signal was specified in the Dockerfile
|
||||||
//
|
//
|
||||||
// Using a negative timeout here will allow the container to stop gracefully,
|
// Using a negative timeout here will allow the container to stop gracefully,
|
||||||
// rather than forcefully terminating it, this value MUST be at least 1
|
// rather than forcefully terminating it. Value is in seconds, but -1 is
|
||||||
// second, otherwise it will be ignored.
|
// treated as indefinitely.
|
||||||
timeout := -1 * time.Second
|
timeout := -1
|
||||||
if err := e.client.ContainerStop(ctx, e.Id, &timeout); err != nil {
|
if err := e.client.ContainerStop(ctx, e.Id, container.StopOptions{Timeout: &timeout}); err != nil {
|
||||||
// If the container does not exist just mark the process as stopped and return without
|
// If the container does not exist just mark the process as stopped and return without
|
||||||
// an error.
|
// an error.
|
||||||
if client.IsErrNotFound(err) {
|
if client.IsErrNotFound(err) {
|
||||||
|
@ -224,7 +230,7 @@ func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, t
|
||||||
|
|
||||||
doTermination := func(s string) error {
|
doTermination := func(s string) error {
|
||||||
e.log().WithField("step", s).WithField("duration", duration).Warn("container stop did not complete in time, terminating process...")
|
e.log().WithField("step", s).WithField("duration", duration).Warn("container stop did not complete in time, terminating process...")
|
||||||
return e.Terminate(ctx, os.Kill)
|
return e.Terminate(ctx, "SIGKILL")
|
||||||
}
|
}
|
||||||
|
|
||||||
// We pass through the timed context for this stop action so that if one of the
|
// We pass through the timed context for this stop action so that if one of the
|
||||||
|
@ -268,8 +274,8 @@ func (e *Environment) WaitForStop(ctx context.Context, duration time.Duration, t
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Terminate forcefully terminates the container using the signal provided.
|
// Sends the specified signal to the container in an attempt to stop it.
|
||||||
func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
|
func (e *Environment) SignalContainer(ctx context.Context, signal string) error {
|
||||||
c, err := e.ContainerInspect(ctx)
|
c, err := e.ContainerInspect(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Treat missing containers as an okay error state, means it is obviously
|
// Treat missing containers as an okay error state, means it is obviously
|
||||||
|
@ -294,11 +300,27 @@ func (e *Environment) Terminate(ctx context.Context, signal os.Signal) error {
|
||||||
|
|
||||||
// We set it to stopping than offline to prevent crash detection from being triggered.
|
// We set it to stopping than offline to prevent crash detection from being triggered.
|
||||||
e.SetState(environment.ProcessStoppingState)
|
e.SetState(environment.ProcessStoppingState)
|
||||||
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
|
if err := e.client.ContainerKill(ctx, e.Id, signal); err != nil && !client.IsErrNotFound(err) {
|
||||||
if err := e.client.ContainerKill(ctx, e.Id, sig); err != nil && !client.IsErrNotFound(err) {
|
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate forcefully terminates the container using the signal provided.
|
||||||
|
// then sets its state to stopped.
|
||||||
|
func (e *Environment) Terminate(ctx context.Context, signal string) error {
|
||||||
|
|
||||||
|
// Send the signal to the container to kill it
|
||||||
|
if err := e.SignalContainer(ctx, signal); err != nil {
|
||||||
|
return errors.WithStack(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We expect Terminate to instantly kill the container
|
||||||
|
// so go ahead and mark it as dead and clean up
|
||||||
e.SetState(environment.ProcessOfflineState)
|
e.SetState(environment.ProcessOfflineState)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@ package environment
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/events"
|
"github.com/pterodactyl/wings/events"
|
||||||
|
@ -72,7 +71,7 @@ type ProcessEnvironment interface {
|
||||||
|
|
||||||
// Terminate stops a running server instance using the provided signal. This function
|
// Terminate stops a running server instance using the provided signal. This function
|
||||||
// is a no-op if the server is already stopped.
|
// is a no-op if the server is already stopped.
|
||||||
Terminate(ctx context.Context, signal os.Signal) error
|
Terminate(ctx context.Context, signal string) error
|
||||||
|
|
||||||
// Destroys the environment removing any containers that were created (in Docker
|
// Destroys the environment removing any containers that were created (in Docker
|
||||||
// environments at least).
|
// environments at least).
|
||||||
|
|
|
@ -34,7 +34,7 @@ type Mount struct {
|
||||||
// Limits is the build settings for a given server that impact docker container
|
// Limits is the build settings for a given server that impact docker container
|
||||||
// creation and resource limits for a server instance.
|
// creation and resource limits for a server instance.
|
||||||
type Limits struct {
|
type Limits struct {
|
||||||
// The total amount of memory in megabytes that this server is allowed to
|
// The total amount of memory in mebibytes that this server is allowed to
|
||||||
// use on the host system.
|
// use on the host system.
|
||||||
MemoryLimit int64 `json:"memory_limit"`
|
MemoryLimit int64 `json:"memory_limit"`
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ func (l Limits) MemoryOverheadMultiplier() float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l Limits) BoundedMemoryLimit() int64 {
|
func (l Limits) BoundedMemoryLimit() int64 {
|
||||||
return int64(math.Round(float64(l.MemoryLimit) * l.MemoryOverheadMultiplier() * 1_000_000))
|
return int64(math.Round(float64(l.MemoryLimit) * l.MemoryOverheadMultiplier() * 1024 * 1024))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvertedSwap returns the amount of swap available as a total in bytes. This
|
// ConvertedSwap returns the amount of swap available as a total in bytes. This
|
||||||
|
@ -90,7 +90,7 @@ func (l Limits) ConvertedSwap() int64 {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
return (l.Swap * 1_000_000) + l.BoundedMemoryLimit()
|
return (l.Swap * 1024 * 1024) + l.BoundedMemoryLimit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessLimit returns the process limit for a container. This is currently
|
// ProcessLimit returns the process limit for a container. This is currently
|
||||||
|
@ -105,7 +105,7 @@ func (l Limits) AsContainerResources() container.Resources {
|
||||||
pids := l.ProcessLimit()
|
pids := l.ProcessLimit()
|
||||||
resources := container.Resources{
|
resources := container.Resources{
|
||||||
Memory: l.BoundedMemoryLimit(),
|
Memory: l.BoundedMemoryLimit(),
|
||||||
MemoryReservation: l.MemoryLimit * 1_000_000,
|
MemoryReservation: l.MemoryLimit * 1024 * 1024,
|
||||||
MemorySwap: l.ConvertedSwap(),
|
MemorySwap: l.ConvertedSwap(),
|
||||||
BlkioWeight: l.IoWeight,
|
BlkioWeight: l.IoWeight,
|
||||||
OomKillDisable: &l.OOMDisabled,
|
OomKillDisable: &l.OOMDisabled,
|
||||||
|
|
79
flake.lock
Normal file
79
flake.lock
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"flake-parts": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs-lib": "nixpkgs-lib"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1719994518,
|
||||||
|
"narHash": "sha256-pQMhCCHyQGRzdfAkdJ4cIWiw+JNuWsTX7f0ZYSyz0VY=",
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "flake-parts",
|
||||||
|
"rev": "9227223f6d922fee3c7b190b2cc238a99527bbb7",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "hercules-ci",
|
||||||
|
"repo": "flake-parts",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1721562059,
|
||||||
|
"narHash": "sha256-Tybxt65eyOARf285hMHIJ2uul8SULjFZbT9ZaEeUnP8=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "68c9ed8bbed9dfce253cc91560bf9043297ef2fe",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"ref": "nixos-unstable",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs-lib": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1719876945,
|
||||||
|
"narHash": "sha256-Fm2rDDs86sHy0/1jxTOKB1118Q0O3Uc7EC0iXvXKpbI=",
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://github.com/NixOS/nixpkgs/archive/5daf0514482af3f97abaefc78a6606365c9108e2.tar.gz"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"type": "tarball",
|
||||||
|
"url": "https://github.com/NixOS/nixpkgs/archive/5daf0514482af3f97abaefc78a6606365c9108e2.tar.gz"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-parts": "flake-parts",
|
||||||
|
"nixpkgs": "nixpkgs",
|
||||||
|
"treefmt-nix": "treefmt-nix"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"treefmt-nix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1721769617,
|
||||||
|
"narHash": "sha256-6Pqa0bi5nV74IZcENKYRToRNM5obo1EQ+3ihtunJ014=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "treefmt-nix",
|
||||||
|
"rev": "8db8970be1fb8be9c845af7ebec53b699fe7e009",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "treefmt-nix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
54
flake.nix
Normal file
54
flake.nix
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
{
|
||||||
|
description = "Wings";
|
||||||
|
|
||||||
|
inputs = {
|
||||||
|
flake-parts.url = "github:hercules-ci/flake-parts";
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||||
|
|
||||||
|
treefmt-nix = {
|
||||||
|
url = "github:numtide/treefmt-nix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs = {...} @ inputs:
|
||||||
|
inputs.flake-parts.lib.mkFlake {inherit inputs;} {
|
||||||
|
systems = ["aarch64-darwin" "aarch64-linux" "x86_64-darwin" "x86_64-linux"];
|
||||||
|
|
||||||
|
imports = [
|
||||||
|
inputs.treefmt-nix.flakeModule
|
||||||
|
];
|
||||||
|
|
||||||
|
perSystem = {system, ...}: let
|
||||||
|
pkgs = import inputs.nixpkgs {inherit system;};
|
||||||
|
in {
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
buildInputs = with pkgs; [
|
||||||
|
go_1_22
|
||||||
|
gofumpt
|
||||||
|
golangci-lint
|
||||||
|
gotools
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
treefmt = {
|
||||||
|
projectRootFile = "flake.nix";
|
||||||
|
|
||||||
|
programs = {
|
||||||
|
alejandra.enable = true;
|
||||||
|
deadnix.enable = true;
|
||||||
|
gofumpt = {
|
||||||
|
enable = true;
|
||||||
|
extra = true;
|
||||||
|
};
|
||||||
|
shellcheck.enable = true;
|
||||||
|
shfmt = {
|
||||||
|
enable = true;
|
||||||
|
indent_size = 0; # 0 causes shfmt to use tabs
|
||||||
|
};
|
||||||
|
yamlfmt.enable = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
167
go.mod
167
go.mod
|
@ -1,124 +1,137 @@
|
||||||
module github.com/pterodactyl/wings
|
module github.com/pterodactyl/wings
|
||||||
|
|
||||||
go 1.18
|
go 1.21
|
||||||
|
|
||||||
require (
|
require (
|
||||||
emperror.dev/errors v0.8.1
|
emperror.dev/errors v0.8.1
|
||||||
github.com/AlecAivazis/survey/v2 v2.3.6
|
github.com/AlecAivazis/survey/v2 v2.3.7
|
||||||
github.com/Jeffail/gabs/v2 v2.6.1
|
github.com/Jeffail/gabs/v2 v2.7.0
|
||||||
github.com/NYTimes/logrotate v1.0.0
|
github.com/NYTimes/logrotate v1.0.0
|
||||||
github.com/acobaugh/osrelease v0.1.0
|
github.com/acobaugh/osrelease v0.1.0
|
||||||
github.com/apex/log v1.9.0
|
github.com/apex/log v1.9.0
|
||||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
|
||||||
github.com/beevik/etree v1.1.0
|
github.com/beevik/etree v1.3.0
|
||||||
github.com/buger/jsonparser v1.1.1
|
github.com/buger/jsonparser v1.1.1
|
||||||
github.com/cenkalti/backoff/v4 v4.1.3
|
github.com/cenkalti/backoff/v4 v4.3.0
|
||||||
github.com/creasty/defaults v1.6.0
|
github.com/creasty/defaults v1.7.0
|
||||||
github.com/docker/docker v20.10.18+incompatible
|
github.com/docker/docker v25.0.4+incompatible
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.5.0
|
||||||
github.com/fatih/color v1.13.0
|
github.com/fatih/color v1.16.0
|
||||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf
|
||||||
github.com/gabriel-vasile/mimetype v1.4.1
|
github.com/gabriel-vasile/mimetype v1.4.3
|
||||||
github.com/gammazero/workerpool v1.1.3
|
github.com/gammazero/workerpool v1.1.3
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
github.com/gin-gonic/gin v1.8.1
|
github.com/gin-gonic/gin v1.9.1
|
||||||
github.com/glebarez/sqlite v1.4.8
|
github.com/glebarez/sqlite v1.11.0
|
||||||
github.com/go-co-op/gocron v1.17.0
|
github.com/go-co-op/gocron v1.37.0
|
||||||
github.com/goccy/go-json v0.9.11
|
github.com/goccy/go-json v0.10.2
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/gorilla/websocket v1.5.1
|
||||||
github.com/iancoleman/strcase v0.2.0
|
github.com/iancoleman/strcase v0.3.0
|
||||||
github.com/icza/dyno v0.0.0-20220812133438-f0b6f8a18845
|
github.com/icza/dyno v0.0.0-20230330125955-09f820a8d9c0
|
||||||
github.com/juju/ratelimit v1.0.2
|
github.com/juju/ratelimit v1.0.2
|
||||||
github.com/karrick/godirwalk v1.17.0
|
github.com/klauspost/compress v1.17.8
|
||||||
github.com/klauspost/compress v1.15.11
|
github.com/klauspost/pgzip v1.2.6
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/magiconair/properties v1.8.7
|
||||||
github.com/magiconair/properties v1.8.6
|
|
||||||
github.com/mattn/go-colorable v0.1.13
|
github.com/mattn/go-colorable v0.1.13
|
||||||
github.com/mholt/archiver/v4 v4.0.0-alpha.7
|
github.com/mholt/archiver/v4 v4.0.0-alpha.8
|
||||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/sftp v1.13.5
|
github.com/pkg/sftp v1.13.6
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
||||||
github.com/spf13/cobra v1.5.0
|
github.com/spf13/cobra v1.8.0
|
||||||
github.com/stretchr/testify v1.8.0
|
github.com/stretchr/testify v1.9.0
|
||||||
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
|
golang.org/x/crypto v0.22.0
|
||||||
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0
|
golang.org/x/sync v0.7.0
|
||||||
|
golang.org/x/sys v0.19.0
|
||||||
gopkg.in/ini.v1 v1.67.0
|
gopkg.in/ini.v1 v1.67.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
gorm.io/gorm v1.23.10
|
gorm.io/gorm v1.25.9
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
github.com/Microsoft/hcsshim v0.12.2 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/bodgit/sevenzip v1.5.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/bodgit/windows v1.0.1 // indirect
|
||||||
github.com/containerd/fifo v1.0.0 // indirect
|
github.com/bytedance/sonic v1.11.3 // indirect
|
||||||
|
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
|
||||||
|
github.com/chenzhuoyu/iasm v0.9.1 // indirect
|
||||||
|
github.com/containerd/log v0.1.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
github.com/distribution/reference v0.6.0 // indirect
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
|
||||||
github.com/docker/go-units v0.5.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/gammazero/deque v0.2.0 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/gammazero/deque v0.2.1 // indirect
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||||
github.com/glebarez/go-sqlite v1.19.1 // indirect
|
github.com/glebarez/go-sqlite v1.22.0 // indirect
|
||||||
github.com/go-playground/locales v0.14.0 // indirect
|
github.com/go-logr/logr v1.4.1 // indirect
|
||||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-playground/validator/v10 v10.11.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
|
github.com/go-playground/validator/v10 v10.19.0 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||||
|
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
|
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/leodido/go-urn v1.2.1 // indirect
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/magefile/mage v1.14.0 // indirect
|
github.com/magefile/mage v1.15.0 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
|
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
|
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.0 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
|
||||||
github.com/prometheus/common v0.37.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
|
|
||||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
github.com/therootcompany/xz v1.0.1 // indirect
|
github.com/therootcompany/xz v1.0.1 // indirect
|
||||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||||
go.uber.org/multierr v1.8.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
go.opentelemetry.io/otel v1.25.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect
|
go.opentelemetry.io/otel/metric v1.25.0 // indirect
|
||||||
golang.org/x/term v0.0.0-20220919170432-7a66f970e087 // indirect
|
go.opentelemetry.io/otel/sdk v1.24.0 // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
go.opentelemetry.io/otel/trace v1.25.0 // indirect
|
||||||
|
go.uber.org/atomic v1.11.0 // indirect
|
||||||
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
|
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||||
|
golang.org/x/arch v0.7.0 // indirect
|
||||||
|
golang.org/x/mod v0.17.0 // indirect
|
||||||
|
golang.org/x/net v0.24.0 // indirect
|
||||||
|
golang.org/x/term v0.19.0 // indirect
|
||||||
|
golang.org/x/text v0.14.0 // indirect
|
||||||
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
|
||||||
golang.org/x/tools v0.1.12 // indirect
|
golang.org/x/tools v0.20.0 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
modernc.org/libc v1.20.0 // indirect
|
gotest.tools/v3 v3.0.2 // indirect
|
||||||
modernc.org/mathutil v1.5.0 // indirect
|
modernc.org/libc v1.49.3 // indirect
|
||||||
modernc.org/memory v1.4.0 // indirect
|
modernc.org/mathutil v1.6.0 // indirect
|
||||||
modernc.org/sqlite v1.19.1 // indirect
|
modernc.org/memory v1.8.0 // indirect
|
||||||
|
modernc.org/sqlite v1.29.6 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package cron
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
|
||||||
|
@ -17,9 +18,9 @@ type activityCron struct {
|
||||||
max int
|
max int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run executes the cronjob and ensures we fetch and send all of the stored activity to the
|
// Run executes the cronjob and ensures we fetch and send all the stored activity to the
|
||||||
// Panel instance. Once activity is sent it is deleted from the local database instance. Any
|
// Panel instance. Once activity is sent it is deleted from the local database instance. Any
|
||||||
// SFTP specific events are not handled in this cron, they're handled seperately to account
|
// SFTP specific events are not handled in this cron, they're handled separately to account
|
||||||
// for de-duplication and event merging.
|
// for de-duplication and event merging.
|
||||||
func (ac *activityCron) Run(ctx context.Context) error {
|
func (ac *activityCron) Run(ctx context.Context) error {
|
||||||
// Don't execute this cron if there is currently one running. Once this task is completed
|
// Don't execute this cron if there is currently one running. Once this task is completed
|
||||||
|
@ -34,7 +35,6 @@ func (ac *activityCron) Run(ctx context.Context) error {
|
||||||
Where("event NOT LIKE ?", "server:sftp.%").
|
Where("event NOT LIKE ?", "server:sftp.%").
|
||||||
Limit(ac.max).
|
Limit(ac.max).
|
||||||
Find(&activity)
|
Find(&activity)
|
||||||
|
|
||||||
if tx.Error != nil {
|
if tx.Error != nil {
|
||||||
return errors.WithStack(tx.Error)
|
return errors.WithStack(tx.Error)
|
||||||
}
|
}
|
||||||
|
@ -42,15 +42,42 @@ func (ac *activityCron) Run(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ac.manager.Client().SendActivityLogs(ctx, activity); err != nil {
|
// ids to delete from the database.
|
||||||
|
ids := make([]int, 0, len(activity))
|
||||||
|
// activities to send to the panel.
|
||||||
|
activities := make([]models.Activity, 0, len(activity))
|
||||||
|
for _, v := range activity {
|
||||||
|
// Delete any activity that has an invalid IP address. This is a fix for
|
||||||
|
// a bug that truncated the last octet of an IPv6 address in the database.
|
||||||
|
if ip := net.ParseIP(v.IP); ip == nil {
|
||||||
|
ids = append(ids, v.ID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
activities = append(activities, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ids) > 0 {
|
||||||
|
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
|
||||||
|
if tx.Error != nil {
|
||||||
|
return errors.WithStack(tx.Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(activities) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ac.manager.Client().SendActivityLogs(ctx, activities); err != nil {
|
||||||
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
|
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
|
||||||
}
|
}
|
||||||
|
|
||||||
var ids []int
|
// Add all the successful activities to the list of IDs to delete.
|
||||||
for _, v := range activity {
|
ids = make([]int, len(activities))
|
||||||
ids = append(ids, v.ID)
|
for i, v := range activities {
|
||||||
|
ids[i] = v.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete all the activities that were sent to the Panel (or that were invalid).
|
||||||
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
|
tx = database.Instance().WithContext(ctx).Where("id IN ?", ids).Delete(&models.Activity{})
|
||||||
if tx.Error != nil {
|
if tx.Error != nil {
|
||||||
return errors.WithStack(tx.Error)
|
return errors.WithStack(tx.Error)
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
package models
|
package models
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/system"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Event string
|
type Event string
|
||||||
|
@ -57,7 +57,9 @@ func (a Activity) SetUser(u string) *Activity {
|
||||||
// is trimmed down to remove any extraneous data, and the timestamp is set to the current
|
// is trimmed down to remove any extraneous data, and the timestamp is set to the current
|
||||||
// system time and then stored as UTC.
|
// system time and then stored as UTC.
|
||||||
func (a *Activity) BeforeCreate(_ *gorm.DB) error {
|
func (a *Activity) BeforeCreate(_ *gorm.DB) error {
|
||||||
a.IP = system.TrimIPSuffix(a.IP)
|
if ip, _, err := net.SplitHostPort(strings.TrimSpace(a.IP)); err == nil {
|
||||||
|
a.IP = ip
|
||||||
|
}
|
||||||
if a.Timestamp.IsZero() {
|
if a.Timestamp.IsZero() {
|
||||||
a.Timestamp = time.Now()
|
a.Timestamp = time.Now()
|
||||||
}
|
}
|
||||||
|
|
21
internal/ufs/LICENSE
Normal file
21
internal/ufs/LICENSE
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
17
internal/ufs/README.md
Normal file
17
internal/ufs/README.md
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
# Filesystem
|
||||||
|
|
||||||
|
## Licensing
|
||||||
|
|
||||||
|
Most code in this package is licensed under `MIT` with some exceptions.
|
||||||
|
|
||||||
|
The following files are licensed under `BSD-3-Clause` due to them being copied
|
||||||
|
verbatim or derived from [Go](https://go.dev)'s source code.
|
||||||
|
|
||||||
|
- [`file_posix.go`](./file_posix.go)
|
||||||
|
- [`mkdir_unix.go`](./mkdir_unix.go)
|
||||||
|
- [`path_unix.go`](./path_unix.go)
|
||||||
|
- [`removeall_unix.go`](./removeall_unix.go)
|
||||||
|
- [`stat_unix.go`](./stat_unix.go)
|
||||||
|
- [`walk.go`](./walk.go)
|
||||||
|
|
||||||
|
These changes are not associated with nor endorsed by The Go Authors.
|
12
internal/ufs/doc.go
Normal file
12
internal/ufs/doc.go
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
// Package ufs provides an abstraction layer for performing I/O on filesystems.
|
||||||
|
// This package is designed to be used in-place of standard `os` package I/O
|
||||||
|
// calls, and is not designed to be used as a generic filesystem abstraction
|
||||||
|
// like the `io/fs` package.
|
||||||
|
//
|
||||||
|
// The primary use-case of this package was to provide a "chroot-like" `os`
|
||||||
|
// wrapper, so we can safely sandbox I/O operations within a directory and
|
||||||
|
// use untrusted arbitrary paths.
|
||||||
|
package ufs
|
120
internal/ufs/error.go
Normal file
120
internal/ufs/error.go
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
iofs "io/fs"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrIsDirectory is an error for when an operation that operates only on
|
||||||
|
// files is given a path to a directory.
|
||||||
|
ErrIsDirectory = errors.New("is a directory")
|
||||||
|
// ErrNotDirectory is an error for when an operation that operates only on
|
||||||
|
// directories is given a path to a file.
|
||||||
|
ErrNotDirectory = errors.New("not a directory")
|
||||||
|
// ErrBadPathResolution is an error for when a sand-boxed filesystem
|
||||||
|
// resolves a given path to a forbidden location.
|
||||||
|
ErrBadPathResolution = errors.New("bad path resolution")
|
||||||
|
// ErrNotRegular is an error for when an operation that operates only on
|
||||||
|
// regular files is passed something other than a regular file.
|
||||||
|
ErrNotRegular = errors.New("not a regular file")
|
||||||
|
|
||||||
|
// ErrClosed is an error for when an entry was accessed after being closed.
|
||||||
|
ErrClosed = iofs.ErrClosed
|
||||||
|
// ErrInvalid is an error for when an invalid argument was used.
|
||||||
|
ErrInvalid = iofs.ErrInvalid
|
||||||
|
// ErrExist is an error for when an entry already exists.
|
||||||
|
ErrExist = iofs.ErrExist
|
||||||
|
// ErrNotExist is an error for when an entry does not exist.
|
||||||
|
ErrNotExist = iofs.ErrNotExist
|
||||||
|
// ErrPermission is an error for when the required permissions to perform an
|
||||||
|
// operation are missing.
|
||||||
|
ErrPermission = iofs.ErrPermission
|
||||||
|
)
|
||||||
|
|
||||||
|
// LinkError records an error during a link or symlink or rename
|
||||||
|
// system call and the paths that caused it.
|
||||||
|
type LinkError = os.LinkError
|
||||||
|
|
||||||
|
// PathError records an error and the operation and file path that caused it.
|
||||||
|
type PathError = iofs.PathError
|
||||||
|
|
||||||
|
// SyscallError records an error from a specific system call.
|
||||||
|
type SyscallError = os.SyscallError
|
||||||
|
|
||||||
|
// NewSyscallError returns, as an error, a new SyscallError
|
||||||
|
// with the given system call name and error details.
|
||||||
|
// As a convenience, if err is nil, NewSyscallError returns nil.
|
||||||
|
func NewSyscallError(syscall string, err error) error {
|
||||||
|
return os.NewSyscallError(syscall, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertErrorType converts errors into our custom errors to ensure consistent
|
||||||
|
// error values.
|
||||||
|
func convertErrorType(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var pErr *PathError
|
||||||
|
switch {
|
||||||
|
case errors.As(err, &pErr):
|
||||||
|
switch {
|
||||||
|
// File exists
|
||||||
|
case errors.Is(pErr.Err, unix.EEXIST):
|
||||||
|
return &PathError{
|
||||||
|
Op: pErr.Op,
|
||||||
|
Path: pErr.Path,
|
||||||
|
Err: ErrExist,
|
||||||
|
}
|
||||||
|
// Is a directory
|
||||||
|
case errors.Is(pErr.Err, unix.EISDIR):
|
||||||
|
return &PathError{
|
||||||
|
Op: pErr.Op,
|
||||||
|
Path: pErr.Path,
|
||||||
|
Err: ErrIsDirectory,
|
||||||
|
}
|
||||||
|
// Not a directory
|
||||||
|
case errors.Is(pErr.Err, unix.ENOTDIR):
|
||||||
|
return &PathError{
|
||||||
|
Op: pErr.Op,
|
||||||
|
Path: pErr.Path,
|
||||||
|
Err: ErrNotDirectory,
|
||||||
|
}
|
||||||
|
// No such file or directory
|
||||||
|
case errors.Is(pErr.Err, unix.ENOENT):
|
||||||
|
return &PathError{
|
||||||
|
Op: pErr.Op,
|
||||||
|
Path: pErr.Path,
|
||||||
|
Err: ErrNotExist,
|
||||||
|
}
|
||||||
|
// Operation not permitted
|
||||||
|
case errors.Is(pErr.Err, unix.EPERM):
|
||||||
|
return &PathError{
|
||||||
|
Op: pErr.Op,
|
||||||
|
Path: pErr.Path,
|
||||||
|
Err: ErrPermission,
|
||||||
|
}
|
||||||
|
// Invalid cross-device link
|
||||||
|
case errors.Is(pErr.Err, unix.EXDEV):
|
||||||
|
return &PathError{
|
||||||
|
Op: pErr.Op,
|
||||||
|
Path: pErr.Path,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
// Too many levels of symbolic links
|
||||||
|
case errors.Is(pErr.Err, unix.ELOOP):
|
||||||
|
return &PathError{
|
||||||
|
Op: pErr.Op,
|
||||||
|
Path: pErr.Path,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
179
internal/ufs/file.go
Normal file
179
internal/ufs/file.go
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
iofs "io/fs"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DirEntry is an entry read from a directory.
|
||||||
|
type DirEntry = iofs.DirEntry
|
||||||
|
|
||||||
|
// File describes readable and/or writable file from a Filesystem.
|
||||||
|
type File interface {
|
||||||
|
// Name returns the base name of the file.
|
||||||
|
Name() string
|
||||||
|
|
||||||
|
// Stat returns the FileInfo structure describing the file.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Stat() (FileInfo, error)
|
||||||
|
|
||||||
|
// ReadDir reads the contents of the directory associated with the file f
|
||||||
|
// and returns a slice of DirEntry values in directory order.
|
||||||
|
// Subsequent calls on the same file will yield later DirEntry records in the directory.
|
||||||
|
//
|
||||||
|
// If n > 0, ReadDir returns at most n DirEntry records.
|
||||||
|
// In this case, if ReadDir returns an empty slice, it will return an error explaining why.
|
||||||
|
// At the end of a directory, the error is io.EOF.
|
||||||
|
//
|
||||||
|
// If n <= 0, ReadDir returns all the DirEntry records remaining in the directory.
|
||||||
|
// When it succeeds, it returns a nil error (not io.EOF).
|
||||||
|
ReadDir(n int) ([]DirEntry, error)
|
||||||
|
|
||||||
|
// Readdirnames reads the contents of the directory associated with file
|
||||||
|
// and returns a slice of up to n names of files in the directory,
|
||||||
|
// in directory order. Subsequent calls on the same file will yield
|
||||||
|
// further names.
|
||||||
|
//
|
||||||
|
// If n > 0, Readdirnames returns at most n names. In this case, if
|
||||||
|
// Readdirnames returns an empty slice, it will return a non-nil error
|
||||||
|
// explaining why. At the end of a directory, the error is io.EOF.
|
||||||
|
//
|
||||||
|
// If n <= 0, Readdirnames returns all the names from the directory in
|
||||||
|
// a single slice. In this case, if Readdirnames succeeds (reads all
|
||||||
|
// the way to the end of the directory), it returns the slice and a
|
||||||
|
// nil error. If it encounters an error before the end of the
|
||||||
|
// directory, Readdirnames returns the names read until that point and
|
||||||
|
// a non-nil error.
|
||||||
|
Readdirnames(n int) (names []string, err error)
|
||||||
|
|
||||||
|
// Fd returns the integer Unix file descriptor referencing the open file.
|
||||||
|
// If f is closed, the file descriptor becomes invalid.
|
||||||
|
// If f is garbage collected, a finalizer may close the file descriptor,
|
||||||
|
// making it invalid; see runtime.SetFinalizer for more information on when
|
||||||
|
// a finalizer might be run. On Unix systems this will cause the SetDeadline
|
||||||
|
// methods to stop working.
|
||||||
|
// Because file descriptors can be reused, the returned file descriptor may
|
||||||
|
// only be closed through the Close method of f, or by its finalizer during
|
||||||
|
// garbage collection. Otherwise, during garbage collection the finalizer
|
||||||
|
// may close an unrelated file descriptor with the same (reused) number.
|
||||||
|
//
|
||||||
|
// As an alternative, see the f.SyscallConn method.
|
||||||
|
Fd() uintptr
|
||||||
|
|
||||||
|
// Truncate changes the size of the file.
|
||||||
|
// It does not change the I/O offset.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Truncate(size int64) error
|
||||||
|
|
||||||
|
io.Closer
|
||||||
|
|
||||||
|
io.Reader
|
||||||
|
io.ReaderAt
|
||||||
|
io.ReaderFrom
|
||||||
|
|
||||||
|
io.Writer
|
||||||
|
io.WriterAt
|
||||||
|
|
||||||
|
io.Seeker
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo describes a file and is returned by Stat and Lstat.
|
||||||
|
type FileInfo = iofs.FileInfo
|
||||||
|
|
||||||
|
// FileMode represents a file's mode and permission bits.
|
||||||
|
// The bits have the same definition on all systems, so that
|
||||||
|
// information about files can be moved from one system
|
||||||
|
// to another portably. Not all bits apply to all systems.
|
||||||
|
// The only required bit is ModeDir for directories.
|
||||||
|
type FileMode = iofs.FileMode
|
||||||
|
|
||||||
|
// The defined file mode bits are the most significant bits of the FileMode.
|
||||||
|
// The nine least-significant bits are the standard Unix rwxrwxrwx permissions.
|
||||||
|
// The values of these bits should be considered part of the public API and
|
||||||
|
// may be used in wire protocols or disk representations: they must not be
|
||||||
|
// changed, although new bits might be added.
|
||||||
|
const (
|
||||||
|
// ModeDir represents a directory.
|
||||||
|
// d: is a directory
|
||||||
|
ModeDir = iofs.ModeDir
|
||||||
|
// ModeAppend represents an append-only file.
|
||||||
|
// a: append-only
|
||||||
|
ModeAppend = iofs.ModeAppend
|
||||||
|
// ModeExclusive represents an exclusive file.
|
||||||
|
// l: exclusive use
|
||||||
|
ModeExclusive = iofs.ModeExclusive
|
||||||
|
// ModeTemporary .
|
||||||
|
// T: temporary file; Plan 9 only.
|
||||||
|
ModeTemporary = iofs.ModeTemporary
|
||||||
|
// ModeSymlink .
|
||||||
|
// L: symbolic link.
|
||||||
|
ModeSymlink = iofs.ModeSymlink
|
||||||
|
// ModeDevice .
|
||||||
|
// D: device file.
|
||||||
|
ModeDevice = iofs.ModeDevice
|
||||||
|
// ModeNamedPipe .
|
||||||
|
// p: named pipe (FIFO)
|
||||||
|
ModeNamedPipe = iofs.ModeNamedPipe
|
||||||
|
// ModeSocket .
|
||||||
|
// S: Unix domain socket.
|
||||||
|
ModeSocket = iofs.ModeSocket
|
||||||
|
// ModeSetuid .
|
||||||
|
// u: setuid
|
||||||
|
ModeSetuid = iofs.ModeSetuid
|
||||||
|
// ModeSetgid .
|
||||||
|
// g: setgid
|
||||||
|
ModeSetgid = iofs.ModeSetgid
|
||||||
|
// ModeCharDevice .
|
||||||
|
// c: Unix character device, when ModeDevice is set
|
||||||
|
ModeCharDevice = iofs.ModeCharDevice
|
||||||
|
// ModeSticky .
|
||||||
|
// t: sticky
|
||||||
|
ModeSticky = iofs.ModeSticky
|
||||||
|
// ModeIrregular .
|
||||||
|
// ?: non-regular file; nothing else is known about this file.
|
||||||
|
ModeIrregular = iofs.ModeIrregular
|
||||||
|
|
||||||
|
// ModeType .
|
||||||
|
ModeType = iofs.ModeType
|
||||||
|
|
||||||
|
// ModePerm .
|
||||||
|
// Unix permission bits, 0o777.
|
||||||
|
ModePerm = iofs.ModePerm
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// O_RDONLY opens the file read-only.
|
||||||
|
O_RDONLY = unix.O_RDONLY
|
||||||
|
// O_WRONLY opens the file write-only.
|
||||||
|
O_WRONLY = unix.O_WRONLY
|
||||||
|
// O_RDWR opens the file read-write.
|
||||||
|
O_RDWR = unix.O_RDWR
|
||||||
|
// O_APPEND appends data to the file when writing.
|
||||||
|
O_APPEND = unix.O_APPEND
|
||||||
|
// O_CREATE creates a new file if it doesn't exist.
|
||||||
|
O_CREATE = unix.O_CREAT
|
||||||
|
// O_EXCL is used with O_CREATE, file must not exist.
|
||||||
|
O_EXCL = unix.O_EXCL
|
||||||
|
// O_SYNC open for synchronous I/O.
|
||||||
|
O_SYNC = unix.O_SYNC
|
||||||
|
// O_TRUNC truncates regular writable file when opened.
|
||||||
|
O_TRUNC = unix.O_TRUNC
|
||||||
|
// O_DIRECTORY opens a directory only. If the entry is not a directory an
|
||||||
|
// error will be returned.
|
||||||
|
O_DIRECTORY = unix.O_DIRECTORY
|
||||||
|
// O_NOFOLLOW opens the exact path given without following symlinks.
|
||||||
|
O_NOFOLLOW = unix.O_NOFOLLOW
|
||||||
|
O_CLOEXEC = unix.O_CLOEXEC
|
||||||
|
O_LARGEFILE = unix.O_LARGEFILE
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
AT_SYMLINK_NOFOLLOW = unix.AT_SYMLINK_NOFOLLOW
|
||||||
|
AT_REMOVEDIR = unix.AT_REMOVEDIR
|
||||||
|
AT_EMPTY_PATH = unix.AT_EMPTY_PATH
|
||||||
|
)
|
49
internal/ufs/file_posix.go
Normal file
49
internal/ufs/file_posix.go
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was copied from `go/src/os/file_posix.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix || (js && wasm) || wasip1 || windows
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ignoringEINTR makes a function call and repeats it if it returns an
|
||||||
|
// EINTR error. This appears to be required even though we install all
|
||||||
|
// signal handlers with SA_RESTART: see https://go.dev/issue/22838,
|
||||||
|
// https://go.dev/issue/38033, https://go.dev/issue/38836,
|
||||||
|
// https://go.dev/issue/40846. Also, https://go.dev/issue/20400 and
|
||||||
|
// https://go.dev/issue/36644 are issues in which a signal handler is
|
||||||
|
// installed without setting SA_RESTART. None of these are the common case,
|
||||||
|
// but there are enough of them that it seems that we can't avoid
|
||||||
|
// an EINTR loop.
|
||||||
|
func ignoringEINTR(fn func() error) error {
|
||||||
|
for {
|
||||||
|
err := fn()
|
||||||
|
if err != unix.EINTR {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
|
||||||
|
func syscallMode(i FileMode) (o FileMode) {
|
||||||
|
o |= i.Perm()
|
||||||
|
if i&ModeSetuid != 0 {
|
||||||
|
o |= unix.S_ISUID
|
||||||
|
}
|
||||||
|
if i&ModeSetgid != 0 {
|
||||||
|
o |= unix.S_ISGID
|
||||||
|
}
|
||||||
|
if i&ModeSticky != 0 {
|
||||||
|
o |= unix.S_ISVTX
|
||||||
|
}
|
||||||
|
// No mapping for Go's ModeTemporary (plan9 only).
|
||||||
|
return
|
||||||
|
}
|
168
internal/ufs/filesystem.go
Normal file
168
internal/ufs/filesystem.go
Normal file
|
@ -0,0 +1,168 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Filesystem represents a filesystem capable of performing I/O operations.
|
||||||
|
type Filesystem interface {
|
||||||
|
// Chmod changes the mode of the named file to mode.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the mode of the link's target.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// A different subset of the mode bits are used, depending on the
|
||||||
|
// operating system.
|
||||||
|
//
|
||||||
|
// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
|
||||||
|
// ModeSticky are used.
|
||||||
|
//
|
||||||
|
// On Windows, only the 0200 bit (owner writable) of mode is used; it
|
||||||
|
// controls whether the file's read-only attribute is set or cleared.
|
||||||
|
// The other bits are currently unused. For compatibility with Go 1.12
|
||||||
|
// and earlier, use a non-zero mode. Use mode 0400 for a read-only
|
||||||
|
// file and 0600 for a readable+writable file.
|
||||||
|
//
|
||||||
|
// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
|
||||||
|
// and ModeTemporary are used.
|
||||||
|
Chmod(name string, mode FileMode) error
|
||||||
|
|
||||||
|
// Chown changes the numeric uid and gid of the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the uid and gid of the link's target.
|
||||||
|
// A uid or gid of -1 means to not change that value.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
|
||||||
|
// EPLAN9 error, wrapped in *PathError.
|
||||||
|
Chown(name string, uid, gid int) error
|
||||||
|
|
||||||
|
// Lchown changes the numeric uid and gid of the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the uid and gid of the link itself.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// On Windows, it always returns the syscall.EWINDOWS error, wrapped
|
||||||
|
// in *PathError.
|
||||||
|
Lchown(name string, uid, gid int) error
|
||||||
|
|
||||||
|
// Chtimes changes the access and modification times of the named
|
||||||
|
// file, similar to the Unix utime() or utimes() functions.
|
||||||
|
//
|
||||||
|
// The underlying filesystem may truncate or round the values to a
|
||||||
|
// less precise time unit.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Chtimes(name string, atime, mtime time.Time) error
|
||||||
|
|
||||||
|
// Create creates or truncates the named file. If the file already exists,
|
||||||
|
// it is truncated.
|
||||||
|
//
|
||||||
|
// If the file does not exist, it is created with mode 0666
|
||||||
|
// (before umask). If successful, methods on the returned File can
|
||||||
|
// be used for I/O; the associated file descriptor has mode O_RDWR.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Create(name string) (File, error)
|
||||||
|
|
||||||
|
// Mkdir creates a new directory with the specified name and permission
|
||||||
|
// bits (before umask).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Mkdir(name string, perm FileMode) error
|
||||||
|
|
||||||
|
// MkdirAll creates a directory named path, along with any necessary
|
||||||
|
// parents, and returns nil, or else returns an error.
|
||||||
|
//
|
||||||
|
// The permission bits perm (before umask) are used for all
|
||||||
|
// directories that MkdirAll creates.
|
||||||
|
// If path is already a directory, MkdirAll does nothing
|
||||||
|
// and returns nil.
|
||||||
|
MkdirAll(path string, perm FileMode) error
|
||||||
|
|
||||||
|
// Open opens the named file for reading.
|
||||||
|
//
|
||||||
|
// If successful, methods on the returned file can be used for reading; the
|
||||||
|
// associated file descriptor has mode O_RDONLY.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Open(name string) (File, error)
|
||||||
|
|
||||||
|
// OpenFile is the generalized open call; most users will use Open
|
||||||
|
// or Create instead. It opens the named file with specified flag
|
||||||
|
// (O_RDONLY etc.).
|
||||||
|
//
|
||||||
|
// If the file does not exist, and the O_CREATE flag
|
||||||
|
// is passed, it is created with mode perm (before umask). If successful,
|
||||||
|
// methods on the returned File can be used for I/O.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
OpenFile(name string, flag int, perm FileMode) (File, error)
|
||||||
|
|
||||||
|
// ReadDir reads the named directory,
|
||||||
|
//
|
||||||
|
// returning all its directory entries sorted by filename.
|
||||||
|
// If an error occurs reading the directory, ReadDir returns the entries it
|
||||||
|
// was able to read before the error, along with the error.
|
||||||
|
ReadDir(name string) ([]DirEntry, error)
|
||||||
|
|
||||||
|
// Remove removes the named file or (empty) directory.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Remove(name string) error
|
||||||
|
|
||||||
|
// RemoveAll removes path and any children it contains.
|
||||||
|
//
|
||||||
|
// It removes everything it can but returns the first error
|
||||||
|
// it encounters. If the path does not exist, RemoveAll
|
||||||
|
// returns nil (no error).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
RemoveAll(path string) error
|
||||||
|
|
||||||
|
// Rename renames (moves) oldpath to newpath.
|
||||||
|
//
|
||||||
|
// If newpath already exists and is not a directory, Rename replaces it.
|
||||||
|
// OS-specific restrictions may apply when oldpath and newpath are in different directories.
|
||||||
|
// Even within the same directory, on non-Unix platforms Rename is not an atomic operation.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *LinkError.
|
||||||
|
Rename(oldname, newname string) error
|
||||||
|
|
||||||
|
// Stat returns a FileInfo describing the named file.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Stat(name string) (FileInfo, error)
|
||||||
|
|
||||||
|
// Lstat returns a FileInfo describing the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, the returned FileInfo
|
||||||
|
// describes the symbolic link. Lstat makes no attempt to follow the link.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
Lstat(name string) (FileInfo, error)
|
||||||
|
|
||||||
|
// Symlink creates newname as a symbolic link to oldname.
|
||||||
|
//
|
||||||
|
// On Windows, a symlink to a non-existent oldname creates a file symlink;
|
||||||
|
// if oldname is later created as a directory the symlink will not work.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *LinkError.
|
||||||
|
Symlink(oldname, newname string) error
|
||||||
|
|
||||||
|
// WalkDir walks the file tree rooted at root, calling fn for each file or
|
||||||
|
// directory in the tree, including root.
|
||||||
|
//
|
||||||
|
// All errors that arise visiting files and directories are filtered by fn:
|
||||||
|
// see the [WalkDirFunc] documentation for details.
|
||||||
|
//
|
||||||
|
// The files are walked in lexical order, which makes the output deterministic
|
||||||
|
// but requires WalkDir to read an entire directory into memory before proceeding
|
||||||
|
// to walk that directory.
|
||||||
|
//
|
||||||
|
// WalkDir does not follow symbolic links found in directories,
|
||||||
|
// but if root itself is a symbolic link, its target will be walked.
|
||||||
|
WalkDir(root string, fn WalkDirFunc) error
|
||||||
|
}
|
159
internal/ufs/fs_quota.go
Normal file
159
internal/ufs/fs_quota.go
Normal file
|
@ -0,0 +1,159 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Quota struct {
|
||||||
|
// fs is the underlying filesystem that runs the actual I/O operations.
|
||||||
|
*UnixFS
|
||||||
|
|
||||||
|
// limit is the size limit of the filesystem.
|
||||||
|
//
|
||||||
|
// limit is atomic to allow the limit to be safely changed after the
|
||||||
|
// filesystem was created.
|
||||||
|
//
|
||||||
|
// A limit of `-1` disables any write operation from being performed.
|
||||||
|
// A limit of `0` disables any limit checking.
|
||||||
|
limit atomic.Int64
|
||||||
|
|
||||||
|
// usage is the current usage of the filesystem.
|
||||||
|
//
|
||||||
|
// If usage is set to `-1`, it hasn't been calculated yet.
|
||||||
|
usage atomic.Int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewQuota(fs *UnixFS, limit int64) *Quota {
|
||||||
|
qfs := Quota{UnixFS: fs}
|
||||||
|
qfs.limit.Store(limit)
|
||||||
|
return &qfs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the filesystem.
|
||||||
|
func (fs *Quota) Close() (err error) {
|
||||||
|
err = fs.UnixFS.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit returns the limit of the filesystem.
|
||||||
|
func (fs *Quota) Limit() int64 {
|
||||||
|
return fs.limit.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLimit returns the limit of the filesystem.
|
||||||
|
func (fs *Quota) SetLimit(newLimit int64) int64 {
|
||||||
|
return fs.limit.Swap(newLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage returns the current usage of the filesystem.
|
||||||
|
func (fs *Quota) Usage() int64 {
|
||||||
|
return fs.usage.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsage updates the total usage of the filesystem.
|
||||||
|
func (fs *Quota) SetUsage(newUsage int64) int64 {
|
||||||
|
return fs.usage.Swap(newUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds `i` to the tracked usage total.
|
||||||
|
func (fs *Quota) Add(i int64) int64 {
|
||||||
|
usage := fs.Usage()
|
||||||
|
|
||||||
|
// If adding `i` to the usage will put us below 0, cap it. (`i` can be negative)
|
||||||
|
if usage+i < 0 {
|
||||||
|
fs.usage.Store(0)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return fs.usage.Add(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanFit checks if the given size can fit in the filesystem without exceeding
|
||||||
|
// the limit of the filesystem.
|
||||||
|
func (fs *Quota) CanFit(size int64) bool {
|
||||||
|
// Get the size limit of the filesystem.
|
||||||
|
limit := fs.Limit()
|
||||||
|
switch limit {
|
||||||
|
case -1:
|
||||||
|
// A limit of -1 means no write operations are allowed.
|
||||||
|
return false
|
||||||
|
case 0:
|
||||||
|
// A limit of 0 means unlimited.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Any other limit is a value we need to check.
|
||||||
|
usage := fs.Usage()
|
||||||
|
if usage == -1 {
|
||||||
|
// We don't know what the current usage is yet.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the current usage + the requested size are under the limit of the
|
||||||
|
// filesystem, allow it.
|
||||||
|
if usage+size <= limit {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Welp, the size would exceed the limit of the filesystem, deny it.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Quota) Remove(name string) error {
|
||||||
|
// For information on why this interface is used here, check its
|
||||||
|
// documentation.
|
||||||
|
s, err := fs.RemoveStat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't reduce the quota's usage as `name` is not a regular file.
|
||||||
|
if !s.Mode().IsRegular() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the size of the deleted file from the quota usage.
|
||||||
|
fs.Add(-s.Size())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll removes path and any children it contains.
|
||||||
|
//
|
||||||
|
// It removes everything it can but returns the first error
|
||||||
|
// it encounters. If the path does not exist, RemoveAll
|
||||||
|
// returns nil (no error).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *Quota) RemoveAll(name string) error {
|
||||||
|
name, err := fs.unsafePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// While removeAll internally checks this, I want to make sure we check it
|
||||||
|
// and return the proper error so our tests can ensure that this will never
|
||||||
|
// be a possibility.
|
||||||
|
if name == "." {
|
||||||
|
return &PathError{
|
||||||
|
Op: "removeall",
|
||||||
|
Path: name,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fs.removeAll(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Quota) removeAll(path string) error {
|
||||||
|
return removeAll(fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Quota) unlinkat(dirfd int, name string, flags int) error {
|
||||||
|
if flags == 0 {
|
||||||
|
s, err := fs.Lstatat(dirfd, name)
|
||||||
|
if err == nil && s.Mode().IsRegular() {
|
||||||
|
fs.Add(-s.Size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fs.UnixFS.unlinkat(dirfd, name, flags)
|
||||||
|
}
|
825
internal/ufs/fs_unix.go
Normal file
825
internal/ufs/fs_unix.go
Normal file
|
@ -0,0 +1,825 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnixFS is a filesystem that uses the unix package to make io calls.
|
||||||
|
//
|
||||||
|
// This is used for proper sand-boxing and full control over the exact syscalls
|
||||||
|
// being performed.
|
||||||
|
type UnixFS struct {
|
||||||
|
// basePath is the base path for file operations to take place in.
|
||||||
|
basePath string
|
||||||
|
|
||||||
|
// dirfd holds the file descriptor of BasePath and is used to ensure
|
||||||
|
// operations are restricted into descendants of BasePath.
|
||||||
|
dirfd atomic.Int64
|
||||||
|
|
||||||
|
// useOpenat2 controls whether the `openat2` syscall is used instead of the
|
||||||
|
// older `openat` syscall.
|
||||||
|
useOpenat2 bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnixFS creates a new sandboxed unix filesystem. BasePath is used as the
|
||||||
|
// sandbox path, operations on BasePath itself are not allowed, but any
|
||||||
|
// operations on its descendants are. Symlinks pointing outside BasePath are
|
||||||
|
// checked and prevented from enabling an escape in a non-raceable manor.
|
||||||
|
func NewUnixFS(basePath string, useOpenat2 bool) (*UnixFS, error) {
|
||||||
|
basePath = strings.TrimSuffix(basePath, "/")
|
||||||
|
// We don't need Openat2, if we are given a basePath that is already unsafe
|
||||||
|
// I give up on trying to sandbox it.
|
||||||
|
dirfd, err := unix.Openat(AT_EMPTY_PATH, basePath, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertErrorType(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := &UnixFS{
|
||||||
|
basePath: basePath,
|
||||||
|
useOpenat2: useOpenat2,
|
||||||
|
}
|
||||||
|
fs.dirfd.Store(int64(dirfd))
|
||||||
|
return fs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BasePath returns the base path of the UnixFS sandbox, file operations
|
||||||
|
// pointing outside this path are prohibited and will be blocked by all
|
||||||
|
// operations implemented by UnixFS.
|
||||||
|
func (fs *UnixFS) BasePath() string {
|
||||||
|
return fs.basePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the file descriptor used to sandbox operations within the
|
||||||
|
// base path of the filesystem.
|
||||||
|
func (fs *UnixFS) Close() error {
|
||||||
|
// Once closed, change dirfd to something invalid to detect when it has been
|
||||||
|
// closed.
|
||||||
|
defer func() {
|
||||||
|
fs.dirfd.Store(-1)
|
||||||
|
}()
|
||||||
|
return unix.Close(int(fs.dirfd.Load()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chmod changes the mode of the named file to mode.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the mode of the link's target.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// A different subset of the mode bits are used, depending on the
|
||||||
|
// operating system.
|
||||||
|
//
|
||||||
|
// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
|
||||||
|
// ModeSticky are used.
|
||||||
|
//
|
||||||
|
// On Windows, only the 0200 bit (owner writable) of mode is used; it
|
||||||
|
// controls whether the file's read-only attribute is set or cleared.
|
||||||
|
// The other bits are currently unused. For compatibility with Go 1.12
|
||||||
|
// and earlier, use a non-zero mode. Use mode 0400 for a read-only
|
||||||
|
// file and 0600 for a readable+writable file.
|
||||||
|
//
|
||||||
|
// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
|
||||||
|
// and ModeTemporary are used.
|
||||||
|
func (fs *UnixFS) Chmod(name string, mode FileMode) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return convertErrorType(unix.Fchmodat(dirfd, name, uint32(mode), 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chown changes the numeric uid and gid of the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the uid and gid of the link's target.
|
||||||
|
// A uid or gid of -1 means to not change that value.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
|
||||||
|
// EPLAN9 error, wrapped in *PathError.
|
||||||
|
func (fs *UnixFS) Chown(name string, uid, gid int) error {
|
||||||
|
return fs.fchown(name, uid, gid, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lchown changes the numeric uid and gid of the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, it changes the uid and gid of the link itself.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
//
|
||||||
|
// On Windows, it always returns the syscall.EWINDOWS error, wrapped
|
||||||
|
// in *PathError.
|
||||||
|
func (fs *UnixFS) Lchown(name string, uid, gid int) error {
|
||||||
|
// With AT_SYMLINK_NOFOLLOW, Fchownat acts like Lchown but allows us to
|
||||||
|
// pass a dirfd.
|
||||||
|
return fs.fchown(name, uid, gid, AT_SYMLINK_NOFOLLOW)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fchown is a re-usable Fchownat syscall used by Chown and Lchown.
|
||||||
|
func (fs *UnixFS) fchown(name string, uid, gid, flags int) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return convertErrorType(unix.Fchownat(dirfd, name, uid, gid, flags))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chownat is like Chown but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Chownat(dirfd int, name string, uid, gid int) error {
|
||||||
|
return convertErrorType(unix.Fchownat(dirfd, name, uid, gid, 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lchownat is like Lchown but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Lchownat(dirfd int, name string, uid, gid int) error {
|
||||||
|
return convertErrorType(unix.Fchownat(dirfd, name, uid, gid, AT_SYMLINK_NOFOLLOW))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chtimes changes the access and modification times of the named
|
||||||
|
// file, similar to the Unix utime() or utimes() functions.
|
||||||
|
//
|
||||||
|
// The underlying filesystem may truncate or round the values to a
|
||||||
|
// less precise time unit.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Chtimes(name string, atime, mtime time.Time) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.Chtimesat(dirfd, name, atime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chtimesat is like Chtimes but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Chtimesat(dirfd int, name string, atime, mtime time.Time) error {
|
||||||
|
var utimes [2]unix.Timespec
|
||||||
|
set := func(i int, t time.Time) {
|
||||||
|
if t.IsZero() {
|
||||||
|
utimes[i] = unix.Timespec{Sec: unix.UTIME_OMIT, Nsec: unix.UTIME_OMIT}
|
||||||
|
} else {
|
||||||
|
utimes[i] = unix.NsecToTimespec(t.UnixNano())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
set(0, atime)
|
||||||
|
set(1, mtime)
|
||||||
|
// This does support `AT_SYMLINK_NOFOLLOW` as well if needed.
|
||||||
|
if err := unix.UtimesNanoAt(dirfd, name, utimes[0:], 0); err != nil {
|
||||||
|
return convertErrorType(&PathError{Op: "chtimes", Path: name, Err: err})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates or truncates the named file. If the file already exists,
|
||||||
|
// it is truncated.
|
||||||
|
//
|
||||||
|
// If the file does not exist, it is created with mode 0666
|
||||||
|
// (before umask). If successful, methods on the returned File can
|
||||||
|
// be used for I/O; the associated file descriptor has mode O_RDWR.
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Create(name string) (File, error) {
|
||||||
|
return fs.OpenFile(name, O_CREATE|O_WRONLY|O_TRUNC, 0o644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mkdir creates a new directory with the specified name and permission
|
||||||
|
// bits (before umask).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Mkdir(name string, mode FileMode) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.Mkdirat(dirfd, name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) Mkdirat(dirfd int, name string, mode FileMode) error {
|
||||||
|
return convertErrorType(unix.Mkdirat(dirfd, name, uint32(mode)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAll creates a directory named path, along with any necessary
|
||||||
|
// parents, and returns nil, or else returns an error.
|
||||||
|
//
|
||||||
|
// The permission bits perm (before umask) are used for all
|
||||||
|
// directories that MkdirAll creates.
|
||||||
|
// If path is already a directory, MkdirAll does nothing
|
||||||
|
// and returns nil.
|
||||||
|
func (fs *UnixFS) MkdirAll(name string, mode FileMode) error {
|
||||||
|
// Ensure name is somewhat clean before continuing.
|
||||||
|
name, err := fs.unsafePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.mkdirAll(name, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the named file for reading.
|
||||||
|
//
|
||||||
|
// If successful, methods on the returned file can be used for reading; the
|
||||||
|
// associated file descriptor has mode O_RDONLY.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Open(name string) (File, error) {
|
||||||
|
return fs.OpenFile(name, O_RDONLY, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenFile is the generalized open call; most users will use Open
|
||||||
|
// or Create instead. It opens the named file with specified flag
|
||||||
|
// (O_RDONLY etc.).
|
||||||
|
//
|
||||||
|
// If the file does not exist, and the O_CREATE flag
|
||||||
|
// is passed, it is created with mode perm (before umask). If successful,
|
||||||
|
// methods on the returned File can be used for I/O.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) OpenFile(name string, flag int, mode FileMode) (File, error) {
|
||||||
|
fd, err := fs.openFile(name, flag, mode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Do not close `fd` here, it is passed to a file that needs the fd, the
|
||||||
|
// caller of this function is responsible for calling Close() on the File
|
||||||
|
// to release the file descriptor.
|
||||||
|
return os.NewFile(uintptr(fd), name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) openFile(name string, flag int, mode FileMode) (int, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return fs.openat(dirfd, name, flag, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) OpenFileat(dirfd int, name string, flag int, mode FileMode) (File, error) {
|
||||||
|
fd, err := fs.openat(dirfd, name, flag, mode)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Do not close `fd` here, it is passed to a file that needs the fd, the
|
||||||
|
// caller of this function is responsible for calling Close() on the File
|
||||||
|
// to release the file descriptor.
|
||||||
|
return os.NewFile(uintptr(fd), name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir reads the named directory,
|
||||||
|
//
|
||||||
|
// returning all its directory entries sorted by filename.
|
||||||
|
// If an error occurs reading the directory, ReadDir returns the entries it
|
||||||
|
// was able to read before the error, along with the error.
|
||||||
|
func (fs *UnixFS) ReadDir(path string) ([]DirEntry, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(path)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fd, err := fs.openat(dirfd, name, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer unix.Close(fd)
|
||||||
|
return fs.readDir(fd, name, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveStat is a combination of Stat and Remove, it is used to more
|
||||||
|
// efficiently remove a file when the caller needs to stat it before
|
||||||
|
// removing it.
|
||||||
|
//
|
||||||
|
// This optimized function exists for our QuotaFS abstraction, which needs
|
||||||
|
// to track writes to a filesystem. When removing a file, the QuotaFS needs
|
||||||
|
// to know if the entry is a file and if so, how large it is. Because we
|
||||||
|
// need to Stat a file in order to get its mode and size, we will already
|
||||||
|
// know if the entry needs to be removed by using Unlink or Rmdir. The
|
||||||
|
// standard `Remove` method just tries both Unlink and Rmdir (in that order)
|
||||||
|
// as it ends up usually being faster and more efficient than calling Stat +
|
||||||
|
// the proper operation in the first place.
|
||||||
|
func (fs *UnixFS) RemoveStat(name string) (FileInfo, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lstat name, we use Lstat as Unlink doesn't care about symlinks.
|
||||||
|
s, err := fs.Lstatat(dirfd, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if s.IsDir() {
|
||||||
|
err = fs.unlinkat(dirfd, name, AT_REMOVEDIR) // Rmdir
|
||||||
|
} else {
|
||||||
|
err = fs.unlinkat(dirfd, name, 0)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return s, convertErrorType(&PathError{Op: "remove", Path: name, Err: err})
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the named file or (empty) directory.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Remove(name string) error {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent trying to Remove the base directory.
|
||||||
|
if name == "." {
|
||||||
|
return &PathError{
|
||||||
|
Op: "remove",
|
||||||
|
Path: name,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// System call interface forces us to know
|
||||||
|
// whether name is a file or directory.
|
||||||
|
// Try both: it is cheaper on average than
|
||||||
|
// doing a Stat plus the right one.
|
||||||
|
err = fs.unlinkat(dirfd, name, 0)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err1 := fs.unlinkat(dirfd, name, AT_REMOVEDIR) // Rmdir
|
||||||
|
if err1 == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both failed: figure out which error to return.
|
||||||
|
// OS X and Linux differ on whether unlink(dir)
|
||||||
|
// returns EISDIR, so can't use that. However,
|
||||||
|
// both agree that rmdir(file) returns ENOTDIR,
|
||||||
|
// so we can use that to decide which error is real.
|
||||||
|
// Rmdir might also return ENOTDIR if given a bad
|
||||||
|
// file path, like /etc/passwd/foo, but in that case,
|
||||||
|
// both errors will be ENOTDIR, so it's okay to
|
||||||
|
// use the error from unlink.
|
||||||
|
if err1 != unix.ENOTDIR {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return convertErrorType(&PathError{Op: "remove", Path: name, Err: err})
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll removes path and any children it contains.
|
||||||
|
//
|
||||||
|
// It removes everything it can but returns the first error
|
||||||
|
// it encounters. If the path does not exist, RemoveAll
|
||||||
|
// returns nil (no error).
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) RemoveAll(name string) error {
|
||||||
|
name, err := fs.unsafePath(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// While removeAll internally checks this, I want to make sure we check it
|
||||||
|
// and return the proper error so our tests can ensure that this will never
|
||||||
|
// be a possibility.
|
||||||
|
if name == "." {
|
||||||
|
return &PathError{
|
||||||
|
Op: "removeall",
|
||||||
|
Path: name,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fs.removeAll(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) unlinkat(dirfd int, name string, flags int) error {
|
||||||
|
return ignoringEINTR(func() error {
|
||||||
|
return unix.Unlinkat(dirfd, name, flags)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename renames (moves) oldpath to newpath.
|
||||||
|
//
|
||||||
|
// If newpath already exists and is not a directory, Rename replaces it.
|
||||||
|
// OS-specific restrictions may apply when oldpath and newpath are in different directories.
|
||||||
|
// Even within the same directory, on non-Unix platforms Rename is not an atomic operation.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *LinkError.
|
||||||
|
func (fs *UnixFS) Rename(oldpath, newpath string) error {
|
||||||
|
// Simple case: both paths are the same.
|
||||||
|
if oldpath == newpath {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
olddirfd, oldname, closeFd, err := fs.safePath(oldpath)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Ensure that we are not trying to rename the base directory itself.
|
||||||
|
// While unix.Renameat ends up throwing a "device or resource busy" error,
|
||||||
|
// that doesn't mean we are protecting the system properly.
|
||||||
|
if oldname == "." {
|
||||||
|
return convertErrorType(&PathError{
|
||||||
|
Op: "rename",
|
||||||
|
Path: oldname,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// Stat the old target to return proper errors.
|
||||||
|
if _, err := fs.Lstatat(olddirfd, oldname); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newdirfd, newname, closeFd2, err := fs.safePath(newpath)
|
||||||
|
if err != nil {
|
||||||
|
closeFd2()
|
||||||
|
if !errors.Is(err, ErrNotExist) {
|
||||||
|
return convertErrorType(err)
|
||||||
|
}
|
||||||
|
var pathErr *PathError
|
||||||
|
if !errors.As(err, &pathErr) {
|
||||||
|
return convertErrorType(err)
|
||||||
|
}
|
||||||
|
if err := fs.MkdirAll(pathErr.Path, 0o755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newdirfd, newname, closeFd2, err = fs.safePath(newpath)
|
||||||
|
defer closeFd2()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
defer closeFd2()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that we are not trying to rename the base directory itself.
|
||||||
|
// While unix.Renameat ends up throwing a "device or resource busy" error,
|
||||||
|
// that doesn't mean we are protecting the system properly.
|
||||||
|
if newname == "." {
|
||||||
|
return convertErrorType(&PathError{
|
||||||
|
Op: "rename",
|
||||||
|
Path: newname,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// Stat the new target to return proper errors.
|
||||||
|
_, err = fs.Lstatat(newdirfd, newname)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return convertErrorType(&PathError{
|
||||||
|
Op: "rename",
|
||||||
|
Path: newname,
|
||||||
|
Err: ErrExist,
|
||||||
|
})
|
||||||
|
case !errors.Is(err, ErrNotExist):
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return unix.Renameat(olddirfd, oldname, newdirfd, newname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat returns a FileInfo describing the named file.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Stat(name string) (FileInfo, error) {
|
||||||
|
return fs.fstat(name, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Statat is like Stat but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Statat(dirfd int, name string) (FileInfo, error) {
|
||||||
|
return fs.fstatat(dirfd, name, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lstat returns a FileInfo describing the named file.
|
||||||
|
//
|
||||||
|
// If the file is a symbolic link, the returned FileInfo
|
||||||
|
// describes the symbolic link. Lstat makes no attempt to follow the link.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *PathError.
|
||||||
|
func (fs *UnixFS) Lstat(name string) (FileInfo, error) {
|
||||||
|
return fs.fstat(name, AT_SYMLINK_NOFOLLOW)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lstatat is like Lstat but allows passing an existing directory file
|
||||||
|
// descriptor rather than needing to resolve one.
|
||||||
|
func (fs *UnixFS) Lstatat(dirfd int, name string) (FileInfo, error) {
|
||||||
|
return fs.fstatat(dirfd, name, AT_SYMLINK_NOFOLLOW)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) fstat(name string, flags int) (FileInfo, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(name)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fs.fstatat(dirfd, name, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) fstatat(dirfd int, name string, flags int) (FileInfo, error) {
|
||||||
|
var s fileStat
|
||||||
|
if err := ignoringEINTR(func() error {
|
||||||
|
return unix.Fstatat(dirfd, name, &s.sys, flags)
|
||||||
|
}); err != nil {
|
||||||
|
return nil, &PathError{Op: "stat", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
fillFileStatFromSys(&s, name)
|
||||||
|
return &s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Symlink creates newname as a symbolic link to oldname.
|
||||||
|
//
|
||||||
|
// On Windows, a symlink to a non-existent oldname creates a file symlink;
|
||||||
|
// if oldname is later created as a directory the symlink will not work.
|
||||||
|
//
|
||||||
|
// If there is an error, it will be of type *LinkError.
|
||||||
|
func (fs *UnixFS) Symlink(oldpath, newpath string) error {
|
||||||
|
dirfd, newpath, closeFd, err := fs.safePath(newpath)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ignoringEINTR(func() error {
|
||||||
|
// We aren't concerned with oldpath here as a symlink can point anywhere
|
||||||
|
// it wants.
|
||||||
|
return unix.Symlinkat(oldpath, dirfd, newpath)
|
||||||
|
}); err != nil {
|
||||||
|
return &LinkError{Op: "symlink", Old: oldpath, New: newpath, Err: err}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Touch will attempt to open a file for reading and/or writing. If the file
|
||||||
|
// does not exist it will be created, and any missing parent directories will
|
||||||
|
// also be created. The opened file may be truncated, only if `flag` has
|
||||||
|
// O_TRUNC set.
|
||||||
|
func (fs *UnixFS) Touch(path string, flag int, mode FileMode) (File, error) {
|
||||||
|
if flag&O_CREATE == 0 {
|
||||||
|
flag |= O_CREATE
|
||||||
|
}
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(path)
|
||||||
|
defer closeFd()
|
||||||
|
if err == nil {
|
||||||
|
return fs.OpenFileat(dirfd, name, flag, mode)
|
||||||
|
}
|
||||||
|
if !errors.Is(err, ErrNotExist) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var pathErr *PathError
|
||||||
|
if !errors.As(err, &pathErr) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := fs.MkdirAll(pathErr.Path, 0o755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Try to open the file one more time after creating its parent directories.
|
||||||
|
return fs.OpenFile(path, flag, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WalkDir walks the file tree rooted at root, calling fn for each file or
|
||||||
|
// directory in the tree, including root.
|
||||||
|
//
|
||||||
|
// All errors that arise visiting files and directories are filtered by fn:
|
||||||
|
// see the [WalkDirFunc] documentation for details.
|
||||||
|
//
|
||||||
|
// The files are walked in lexical order, which makes the output deterministic
|
||||||
|
// but requires WalkDir to read an entire directory into memory before proceeding
|
||||||
|
// to walk that directory.
|
||||||
|
//
|
||||||
|
// WalkDir does not follow symbolic links found in directories,
|
||||||
|
// but if root itself is a symbolic link, its target will be walked.
|
||||||
|
func (fs *UnixFS) WalkDir(root string, fn WalkDirFunc) error {
|
||||||
|
return WalkDir(fs, root, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// openat is a wrapper around both unix.Openat and unix.Openat2. If the UnixFS
|
||||||
|
// was configured to enable openat2 support, unix.Openat2 will be used instead
|
||||||
|
// of unix.Openat due to having better security properties for our use-case.
|
||||||
|
func (fs *UnixFS) openat(dirfd int, name string, flag int, mode FileMode) (int, error) {
|
||||||
|
if flag&O_NOFOLLOW == 0 {
|
||||||
|
flag |= O_NOFOLLOW
|
||||||
|
}
|
||||||
|
|
||||||
|
var fd int
|
||||||
|
for {
|
||||||
|
var err error
|
||||||
|
if fs.useOpenat2 {
|
||||||
|
fd, err = fs._openat2(dirfd, name, uint64(flag), uint64(syscallMode(mode)))
|
||||||
|
} else {
|
||||||
|
fd, err = fs._openat(dirfd, name, flag, uint32(syscallMode(mode)))
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// We have to check EINTR here, per issues https://go.dev/issue/11180 and https://go.dev/issue/39237.
|
||||||
|
if err == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return 0, convertErrorType(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are not using openat2, do additional path checking. This assumes
|
||||||
|
// that openat2 is using `RESOLVE_BENEATH` to avoid the same security
|
||||||
|
// issue.
|
||||||
|
if !fs.useOpenat2 {
|
||||||
|
var finalPath string
|
||||||
|
finalPath, err := filepath.EvalSymlinks(filepath.Join("/proc/self/fd/", strconv.Itoa(dirfd)))
|
||||||
|
if err != nil {
|
||||||
|
return fd, convertErrorType(err)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, ErrNotExist) {
|
||||||
|
return fd, fmt.Errorf("failed to evaluate symlink: %w", convertErrorType(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The target of one of the symlinks (EvalSymlinks is recursive)
|
||||||
|
// does not exist. So get the path that does not exist and use
|
||||||
|
// that for further validation instead.
|
||||||
|
var pErr *PathError
|
||||||
|
if ok := errors.As(err, &pErr); !ok {
|
||||||
|
return fd, fmt.Errorf("failed to evaluate symlink: %w", convertErrorType(err))
|
||||||
|
}
|
||||||
|
finalPath = pErr.Path
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the path is within our root.
|
||||||
|
if !fs.unsafeIsPathInsideOfBase(finalPath) {
|
||||||
|
return fd, convertErrorType(&PathError{
|
||||||
|
Op: "openat",
|
||||||
|
Path: name,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// _openat is a wrapper around unix.Openat. This method should never be directly
|
||||||
|
// called, use `openat` instead.
|
||||||
|
func (fs *UnixFS) _openat(dirfd int, name string, flag int, mode uint32) (int, error) {
|
||||||
|
// Ensure the O_CLOEXEC flag is set.
|
||||||
|
// Go sets this in the os package, but since we are directly using unix
|
||||||
|
// we need to set it ourselves.
|
||||||
|
if flag&O_CLOEXEC == 0 {
|
||||||
|
flag |= O_CLOEXEC
|
||||||
|
}
|
||||||
|
// O_LARGEFILE is set by Openat for us automatically.
|
||||||
|
fd, err := unix.Openat(dirfd, name, flag, mode)
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return fd, nil
|
||||||
|
case err == unix.EINTR:
|
||||||
|
return 0, err
|
||||||
|
case err == unix.EAGAIN:
|
||||||
|
return 0, err
|
||||||
|
default:
|
||||||
|
return 0, &PathError{Op: "openat", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// _openat2 is a wonderful syscall that supersedes the `openat` syscall. It has
|
||||||
|
// improved validation and security characteristics that weren't available or
|
||||||
|
// considered when `openat` was originally implemented. As such, it is only
|
||||||
|
// present in Kernel 5.6 and above.
|
||||||
|
//
|
||||||
|
// This method should never be directly called, use `openat` instead.
|
||||||
|
func (fs *UnixFS) _openat2(dirfd int, name string, flag uint64, mode uint64) (int, error) {
|
||||||
|
// Ensure the O_CLOEXEC flag is set.
|
||||||
|
// Go sets this when using the os package, but since we are directly using
|
||||||
|
// the unix package we need to set it ourselves.
|
||||||
|
if flag&O_CLOEXEC == 0 {
|
||||||
|
flag |= O_CLOEXEC
|
||||||
|
}
|
||||||
|
// Ensure the O_LARGEFILE flag is set.
|
||||||
|
// Go sets this for unix.Open, unix.Openat, but not unix.Openat2.
|
||||||
|
if flag&O_LARGEFILE == 0 {
|
||||||
|
flag |= O_LARGEFILE
|
||||||
|
}
|
||||||
|
fd, err := unix.Openat2(dirfd, name, &unix.OpenHow{
|
||||||
|
Flags: flag,
|
||||||
|
Mode: mode,
|
||||||
|
// This is the bread and butter of preventing a symlink escape, without
|
||||||
|
// this option, we have to handle path validation fully on our own.
|
||||||
|
//
|
||||||
|
// This is why using Openat2 over Openat is preferred if available.
|
||||||
|
Resolve: unix.RESOLVE_BENEATH,
|
||||||
|
})
|
||||||
|
switch {
|
||||||
|
case err == nil:
|
||||||
|
return fd, nil
|
||||||
|
case err == unix.EINTR:
|
||||||
|
return 0, err
|
||||||
|
case err == unix.EAGAIN:
|
||||||
|
return 0, err
|
||||||
|
default:
|
||||||
|
return 0, &PathError{Op: "openat2", Path: name, Err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) SafePath(path string) (int, string, func(), error) {
|
||||||
|
return fs.safePath(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) safePath(path string) (dirfd int, file string, closeFd func(), err error) {
|
||||||
|
// Default closeFd to a NO-OP.
|
||||||
|
closeFd = func() {}
|
||||||
|
|
||||||
|
// Use unsafePath to clean the path and strip BasePath if path is absolute.
|
||||||
|
var name string
|
||||||
|
name, err = fs.unsafePath(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if dirfd was closed, this will happen if (*UnixFS).Close()
|
||||||
|
// was called.
|
||||||
|
fsDirfd := int(fs.dirfd.Load())
|
||||||
|
if fsDirfd == -1 {
|
||||||
|
err = ErrClosed
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split the parent from the last element in the path, this gives us the
|
||||||
|
// "file name" and the full path to its parent.
|
||||||
|
var dir string
|
||||||
|
dir, file = filepath.Split(name)
|
||||||
|
// If dir is empty then name is not nested.
|
||||||
|
if dir == "" {
|
||||||
|
// We don't need to set closeFd here as it will default to a NO-OP and
|
||||||
|
// `fs.dirfd` is re-used until the filesystem is no-longer needed.
|
||||||
|
dirfd = fsDirfd
|
||||||
|
|
||||||
|
// Return dirfd, name, an empty closeFd func, and no error
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dir will usually contain a trailing slash as filepath.Split doesn't
|
||||||
|
// trim slashes.
|
||||||
|
dir = strings.TrimSuffix(dir, "/")
|
||||||
|
dirfd, err = fs.openat(fsDirfd, dir, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if dirfd != 0 {
|
||||||
|
// Set closeFd to close the newly opened directory file descriptor.
|
||||||
|
closeFd = func() { _ = unix.Close(dirfd) }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return dirfd, name, the closeFd func, and err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafePath prefixes the given path and prefixes it with the filesystem's
|
||||||
|
// base path, cleaning the result. The path returned by this function may not
|
||||||
|
// be inside the filesystem's base path, additional checks are required to
|
||||||
|
// safely use paths returned by this function.
|
||||||
|
func (fs *UnixFS) unsafePath(path string) (string, error) {
|
||||||
|
// Calling filepath.Clean on the joined directory will resolve it to the
|
||||||
|
// absolute path, removing any ../ type of resolution arguments, and leaving
|
||||||
|
// us with a direct path link.
|
||||||
|
//
|
||||||
|
// This will also trim the existing root path off the beginning of the path
|
||||||
|
// passed to the function since that can get a bit messy.
|
||||||
|
r := filepath.Clean(filepath.Join(fs.basePath, strings.TrimPrefix(path, fs.basePath)))
|
||||||
|
|
||||||
|
if fs.unsafeIsPathInsideOfBase(r) {
|
||||||
|
// This is kinda ironic isn't it.
|
||||||
|
// We do this as we are operating with dirfds and `*at` syscalls which
|
||||||
|
// behave differently if given an absolute path.
|
||||||
|
//
|
||||||
|
// First trim the BasePath, then trim any leading slashes.
|
||||||
|
r = strings.TrimPrefix(strings.TrimPrefix(r, fs.basePath), "/")
|
||||||
|
// If the path is empty then return "." as the path is pointing to the
|
||||||
|
// root.
|
||||||
|
if r == "" {
|
||||||
|
return ".", nil
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", &PathError{
|
||||||
|
Op: "safePath",
|
||||||
|
Path: path,
|
||||||
|
Err: ErrBadPathResolution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafeIsPathInsideOfBase checks if the given path is inside the filesystem's
|
||||||
|
// base path.
|
||||||
|
func (fs *UnixFS) unsafeIsPathInsideOfBase(path string) bool {
|
||||||
|
return strings.HasPrefix(
|
||||||
|
strings.TrimSuffix(path, "/")+"/",
|
||||||
|
fs.basePath+"/",
|
||||||
|
)
|
||||||
|
}
|
255
internal/ufs/fs_unix_test.go
Normal file
255
internal/ufs/fs_unix_test.go
Normal file
|
@ -0,0 +1,255 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testUnixFS struct {
|
||||||
|
*ufs.UnixFS
|
||||||
|
|
||||||
|
TmpDir string
|
||||||
|
Root string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *testUnixFS) Cleanup() {
|
||||||
|
_ = fs.Close()
|
||||||
|
_ = os.RemoveAll(fs.TmpDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestUnixFS() (*testUnixFS, error) {
|
||||||
|
tmpDir, err := os.MkdirTemp(os.TempDir(), "ufs")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
root := filepath.Join(tmpDir, "root")
|
||||||
|
if err := os.Mkdir(root, 0o755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// TODO: test both disabled and enabled.
|
||||||
|
fs, err := ufs.NewUnixFS(root, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tfs := &testUnixFS{
|
||||||
|
UnixFS: fs,
|
||||||
|
TmpDir: tmpDir,
|
||||||
|
Root: root,
|
||||||
|
}
|
||||||
|
return tfs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Remove(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
t.Run("base directory", func(t *testing.T) {
|
||||||
|
// Try to remove the base directory.
|
||||||
|
if err := fs.Remove(""); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("path traversal", func(t *testing.T) {
|
||||||
|
// Try to remove the base directory.
|
||||||
|
if err := fs.RemoveAll("../root"); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_RemoveAll(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
t.Run("base directory", func(t *testing.T) {
|
||||||
|
// Try to remove the base directory.
|
||||||
|
if err := fs.RemoveAll(""); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("path traversal", func(t *testing.T) {
|
||||||
|
// Try to remove the base directory.
|
||||||
|
if err := fs.RemoveAll("../root"); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Rename(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
t.Run("rename base directory", func(t *testing.T) {
|
||||||
|
// Try to rename the base directory.
|
||||||
|
if err := fs.Rename("", "yeet"); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("rename over base directory", func(t *testing.T) {
|
||||||
|
// Create a directory that we are going to try and move over top of the
|
||||||
|
// existing base directory.
|
||||||
|
if err := fs.Mkdir("overwrite_dir", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to rename over the base directory.
|
||||||
|
if err := fs.Rename("overwrite_dir", ""); !errors.Is(err, ufs.ErrBadPathResolution) {
|
||||||
|
t.Errorf("expected an a bad path resolution error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("directory rename", func(t *testing.T) {
|
||||||
|
// Create a directory to rename to something else.
|
||||||
|
if err := fs.Mkdir("test_directory", 0o755); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to rename "test_directory" to "directory".
|
||||||
|
if err := fs.Rename("test_directory", "directory"); err != nil {
|
||||||
|
t.Errorf("expected no error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, "directory")); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("file rename", func(t *testing.T) {
|
||||||
|
// Create a directory to rename to something else.
|
||||||
|
if f, err := fs.Create("test_file"); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
_ = f.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to rename "test_file" to "file".
|
||||||
|
if err := fs.Rename("test_file", "file"); err != nil {
|
||||||
|
t.Errorf("expected no error, but got: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, "file")); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnixFS_Touch(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
fs, err := newTestUnixFS()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fs.Cleanup()
|
||||||
|
|
||||||
|
t.Run("base directory", func(t *testing.T) {
|
||||||
|
path := "i_touched_a_file"
|
||||||
|
f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("existing parent directory", func(t *testing.T) {
|
||||||
|
dir := "some_parent_directory"
|
||||||
|
if err := fs.Mkdir(dir, 0o755); err != nil {
|
||||||
|
t.Errorf("error creating parent directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
path := filepath.Join(dir, "i_touched_a_file")
|
||||||
|
f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error touching file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("non-existent parent directory", func(t *testing.T) {
|
||||||
|
path := "some_other_directory/i_touched_a_file"
|
||||||
|
f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error touching file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("non-existent parent directories", func(t *testing.T) {
|
||||||
|
path := "some_other_directory/some_directory/i_touched_a_file"
|
||||||
|
f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error touching file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
|
||||||
|
// Sanity check
|
||||||
|
if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
|
||||||
|
t.Errorf("Lstat errored when performing sanity check: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
27
internal/ufs/go.LICENSE
Normal file
27
internal/ufs/go.LICENSE
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
67
internal/ufs/mkdir_unix.go
Normal file
67
internal/ufs/mkdir_unix.go
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was derived from `go/src/os/path.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mkdirAll is a recursive Mkdir implementation that properly handles symlinks.
|
||||||
|
func (fs *UnixFS) mkdirAll(name string, mode FileMode) error {
|
||||||
|
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||||
|
dir, err := fs.Lstat(name)
|
||||||
|
if err == nil {
|
||||||
|
if dir.Mode()&ModeSymlink != 0 {
|
||||||
|
// If the final path is a symlink, resolve its target and use that
|
||||||
|
// to check instead.
|
||||||
|
dir, err = fs.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if dir.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return convertErrorType(&PathError{Op: "mkdir", Path: name, Err: unix.ENOTDIR})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow path: make sure parent exists and then call Mkdir for path.
|
||||||
|
i := len(name)
|
||||||
|
for i > 0 && name[i-1] == '/' { // Skip trailing path separator.
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
|
||||||
|
j := i
|
||||||
|
for j > 0 && name[j-1] != '/' { // Scan backward over element.
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
|
||||||
|
if j > 1 {
|
||||||
|
// Create parent.
|
||||||
|
err = fs.mkdirAll(name[:j-1], mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent now exists; invoke Mkdir and use its result.
|
||||||
|
err = fs.Mkdir(name, mode)
|
||||||
|
if err != nil {
|
||||||
|
// Handle arguments like "foo/." by
|
||||||
|
// double-checking that directory doesn't exist.
|
||||||
|
dir, err1 := fs.Lstat(name)
|
||||||
|
if err1 == nil && dir.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
80
internal/ufs/path_unix.go
Normal file
80
internal/ufs/path_unix.go
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was copied from `go/src/os/path.go`
|
||||||
|
// and `go/src/os/path_unix.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// basename removes trailing slashes and the leading directory name from path name.
|
||||||
|
func basename(name string) string {
|
||||||
|
i := len(name) - 1
|
||||||
|
// Remove trailing slashes
|
||||||
|
for ; i > 0 && name[i] == '/'; i-- {
|
||||||
|
name = name[:i]
|
||||||
|
}
|
||||||
|
// Remove leading directory name
|
||||||
|
for i--; i >= 0; i-- {
|
||||||
|
if name[i] == '/' {
|
||||||
|
name = name[i+1:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// endsWithDot reports whether the final component of path is ".".
|
||||||
|
func endsWithDot(path string) bool {
|
||||||
|
if path == "." {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(path) >= 2 && path[len(path)-1] == '.' && os.IsPathSeparator(path[len(path)-2]) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// splitPath returns the base name and parent directory.
|
||||||
|
func splitPath(path string) (string, string) {
|
||||||
|
// if no better parent is found, the path is relative from "here"
|
||||||
|
dirname := "."
|
||||||
|
|
||||||
|
// Remove all but one leading slash.
|
||||||
|
for len(path) > 1 && path[0] == '/' && path[1] == '/' {
|
||||||
|
path = path[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
i := len(path) - 1
|
||||||
|
|
||||||
|
// Remove trailing slashes.
|
||||||
|
for ; i > 0 && path[i] == '/'; i-- {
|
||||||
|
path = path[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// if no slashes in path, base is path
|
||||||
|
basename := path
|
||||||
|
|
||||||
|
// Remove leading directory path
|
||||||
|
for i--; i >= 0; i-- {
|
||||||
|
if path[i] == '/' {
|
||||||
|
if i == 0 {
|
||||||
|
dirname = path[:1]
|
||||||
|
} else {
|
||||||
|
dirname = path[:i]
|
||||||
|
}
|
||||||
|
basename = path[i+1:]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dirname, basename
|
||||||
|
}
|
117
internal/ufs/quota_writer.go
Normal file
117
internal/ufs/quota_writer.go
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CountedWriter is a writer that counts the amount of data written to the
|
||||||
|
// underlying writer.
|
||||||
|
type CountedWriter struct {
|
||||||
|
File
|
||||||
|
|
||||||
|
counter atomic.Int64
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCountedWriter returns a new countedWriter that counts the amount of bytes
|
||||||
|
// written to the underlying writer.
|
||||||
|
func NewCountedWriter(f File) *CountedWriter {
|
||||||
|
return &CountedWriter{File: f}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesWritten returns the amount of bytes that have been written to the
|
||||||
|
// underlying writer.
|
||||||
|
func (w *CountedWriter) BytesWritten() int64 {
|
||||||
|
return w.counter.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error from the writer if any. If the error is an EOF, nil
|
||||||
|
// will be returned.
|
||||||
|
func (w *CountedWriter) Error() error {
|
||||||
|
if errors.Is(w.err, io.EOF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return w.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes bytes to the underlying writer while tracking the total amount
|
||||||
|
// of bytes written.
|
||||||
|
func (w *CountedWriter) Write(p []byte) (int, error) {
|
||||||
|
if w.err != nil {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write is a very simple operation for us to handle.
|
||||||
|
n, err := w.File.Write(p)
|
||||||
|
w.counter.Add(int64(n))
|
||||||
|
w.err = err
|
||||||
|
|
||||||
|
// TODO: is this how we actually want to handle errors with this?
|
||||||
|
if err == io.EOF {
|
||||||
|
return n, io.EOF
|
||||||
|
} else {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *CountedWriter) ReadFrom(r io.Reader) (n int64, err error) {
|
||||||
|
cr := NewCountedReader(r)
|
||||||
|
n, err = w.File.ReadFrom(cr)
|
||||||
|
w.counter.Add(n)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountedReader is a reader that counts the amount of data read from the
|
||||||
|
// underlying reader.
|
||||||
|
type CountedReader struct {
|
||||||
|
reader io.Reader
|
||||||
|
|
||||||
|
counter atomic.Int64
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ io.Reader = (*CountedReader)(nil)
|
||||||
|
|
||||||
|
// NewCountedReader returns a new countedReader that counts the amount of bytes
|
||||||
|
// read from the underlying reader.
|
||||||
|
func NewCountedReader(r io.Reader) *CountedReader {
|
||||||
|
return &CountedReader{reader: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesRead returns the amount of bytes that have been read from the underlying
|
||||||
|
// reader.
|
||||||
|
func (r *CountedReader) BytesRead() int64 {
|
||||||
|
return r.counter.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error from the reader if any. If the error is an EOF, nil
|
||||||
|
// will be returned.
|
||||||
|
func (r *CountedReader) Error() error {
|
||||||
|
if errors.Is(r.err, io.EOF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return r.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from the underlying reader while tracking the total amount
|
||||||
|
// of bytes read.
|
||||||
|
func (r *CountedReader) Read(p []byte) (int, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := r.reader.Read(p)
|
||||||
|
r.counter.Add(int64(n))
|
||||||
|
r.err = err
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
return n, io.EOF
|
||||||
|
} else {
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
}
|
207
internal/ufs/removeall_unix.go
Normal file
207
internal/ufs/removeall_unix.go
Normal file
|
@ -0,0 +1,207 @@
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was derived from `go/src/os/removeall_at.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type unixFS interface {
|
||||||
|
Open(name string) (File, error)
|
||||||
|
Remove(name string) error
|
||||||
|
unlinkat(dirfd int, path string, flags int) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) removeAll(path string) error {
|
||||||
|
return removeAll(fs, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeAll(fs unixFS, path string) error {
|
||||||
|
if path == "" {
|
||||||
|
// fail silently to retain compatibility with previous behavior
|
||||||
|
// of RemoveAll. See issue https://go.dev/issue/28830.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The rmdir system call does not permit removing ".",
|
||||||
|
// so we don't permit it either.
|
||||||
|
if endsWithDot(path) {
|
||||||
|
return &PathError{Op: "removeall", Path: path, Err: unix.EINVAL}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple case: if Remove works, we're done.
|
||||||
|
err := fs.Remove(path)
|
||||||
|
if err == nil || errors.Is(err, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAll recurses by deleting the path base from
|
||||||
|
// its parent directory
|
||||||
|
parentDir, base := splitPath(path)
|
||||||
|
|
||||||
|
parent, err := fs.Open(parentDir)
|
||||||
|
if errors.Is(err, ErrNotExist) {
|
||||||
|
// If parent does not exist, base cannot exist. Fail silently
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer parent.Close()
|
||||||
|
|
||||||
|
if err := removeAllFrom(fs, parent, base); err != nil {
|
||||||
|
if pathErr, ok := err.(*PathError); ok {
|
||||||
|
pathErr.Path = parentDir + string(os.PathSeparator) + pathErr.Path
|
||||||
|
err = pathErr
|
||||||
|
}
|
||||||
|
return convertErrorType(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeAllFrom(fs unixFS, parent File, base string) error {
|
||||||
|
parentFd := int(parent.Fd())
|
||||||
|
// Simple case: if Unlink (aka remove) works, we're done.
|
||||||
|
err := fs.unlinkat(parentFd, base, 0)
|
||||||
|
if err == nil || errors.Is(err, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EISDIR means that we have a directory, and we need to
|
||||||
|
// remove its contents.
|
||||||
|
// EPERM or EACCES means that we don't have write permission on
|
||||||
|
// the parent directory, but this entry might still be a directory
|
||||||
|
// whose contents need to be removed.
|
||||||
|
// Otherwise, just return the error.
|
||||||
|
if err != unix.EISDIR && err != unix.EPERM && err != unix.EACCES {
|
||||||
|
return &PathError{Op: "unlinkat", Path: base, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is this a directory we need to recurse into?
|
||||||
|
var statInfo unix.Stat_t
|
||||||
|
statErr := ignoringEINTR(func() error {
|
||||||
|
return unix.Fstatat(parentFd, base, &statInfo, AT_SYMLINK_NOFOLLOW)
|
||||||
|
})
|
||||||
|
if statErr != nil {
|
||||||
|
if errors.Is(statErr, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PathError{Op: "fstatat", Path: base, Err: statErr}
|
||||||
|
}
|
||||||
|
if statInfo.Mode&unix.S_IFMT != unix.S_IFDIR {
|
||||||
|
// Not a directory; return the error from the unix.Unlinkat.
|
||||||
|
return &PathError{Op: "unlinkat", Path: base, Err: err}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the directory's entries.
|
||||||
|
var recurseErr error
|
||||||
|
for {
|
||||||
|
const reqSize = 1024
|
||||||
|
var respSize int
|
||||||
|
|
||||||
|
// Open the directory to recurse into
|
||||||
|
file, err := openFdAt(parentFd, base)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
recurseErr = &PathError{Op: "openfdat", Path: base, Err: err}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
numErr := 0
|
||||||
|
|
||||||
|
names, readErr := file.Readdirnames(reqSize)
|
||||||
|
// Errors other than EOF should stop us from continuing.
|
||||||
|
if readErr != nil && readErr != io.EOF {
|
||||||
|
_ = file.Close()
|
||||||
|
if errors.Is(readErr, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PathError{Op: "readdirnames", Path: base, Err: readErr}
|
||||||
|
}
|
||||||
|
|
||||||
|
respSize = len(names)
|
||||||
|
for _, name := range names {
|
||||||
|
err := removeAllFrom(fs, file, name)
|
||||||
|
if err != nil {
|
||||||
|
if pathErr, ok := err.(*PathError); ok {
|
||||||
|
pathErr.Path = base + string(os.PathSeparator) + pathErr.Path
|
||||||
|
}
|
||||||
|
numErr++
|
||||||
|
if recurseErr == nil {
|
||||||
|
recurseErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we can delete any entry, break to start new iteration.
|
||||||
|
// Otherwise, we discard current names, get next entries and try deleting them.
|
||||||
|
if numErr != reqSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removing files from the directory may have caused
|
||||||
|
// the OS to reshuffle it. Simply calling Readdirnames
|
||||||
|
// again may skip some entries. The only reliable way
|
||||||
|
// to avoid this is to close and re-open the
|
||||||
|
// directory. See issue https://go.dev/issue/20841.
|
||||||
|
_ = file.Close()
|
||||||
|
|
||||||
|
// Finish when the end of the directory is reached
|
||||||
|
if respSize < reqSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the directory itself.
|
||||||
|
unlinkErr := fs.unlinkat(parentFd, base, AT_REMOVEDIR)
|
||||||
|
if unlinkErr == nil || errors.Is(unlinkErr, ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if recurseErr != nil {
|
||||||
|
return recurseErr
|
||||||
|
}
|
||||||
|
return &PathError{Op: "unlinkat", Path: base, Err: unlinkErr}
|
||||||
|
}
|
||||||
|
|
||||||
|
// openFdAt opens path relative to the directory in fd.
|
||||||
|
// Other than that this should act like openFileNolog.
|
||||||
|
// This acts like openFileNolog rather than OpenFile because
|
||||||
|
// we are going to (try to) remove the file.
|
||||||
|
// The contents of this file are not relevant for test caching.
|
||||||
|
func openFdAt(dirfd int, name string) (File, error) {
|
||||||
|
var fd int
|
||||||
|
for {
|
||||||
|
var err error
|
||||||
|
fd, err = unix.Openat(dirfd, name, O_RDONLY|O_CLOEXEC|O_NOFOLLOW, 0)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// See comment in openFileNolog.
|
||||||
|
if err == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// This is stupid, os.NewFile immediately casts `fd` to an `int`, but wants
|
||||||
|
// it to be passed as a `uintptr`.
|
||||||
|
return os.NewFile(uintptr(fd), name), nil
|
||||||
|
}
|
67
internal/ufs/stat_unix.go
Normal file
67
internal/ufs/stat_unix.go
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was copied from `go/src/os/stat_linux.go`
|
||||||
|
// and `go/src/os/types_unix.go`.
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fileStat struct {
|
||||||
|
name string
|
||||||
|
size int64
|
||||||
|
mode FileMode
|
||||||
|
modTime time.Time
|
||||||
|
sys unix.Stat_t
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ FileInfo = (*fileStat)(nil)
|
||||||
|
|
||||||
|
func (fs *fileStat) Size() int64 { return fs.size }
|
||||||
|
func (fs *fileStat) Mode() FileMode { return fs.mode }
|
||||||
|
func (fs *fileStat) ModTime() time.Time { return fs.modTime }
|
||||||
|
func (fs *fileStat) Sys() any { return &fs.sys }
|
||||||
|
func (fs *fileStat) Name() string { return fs.name }
|
||||||
|
func (fs *fileStat) IsDir() bool { return fs.Mode().IsDir() }
|
||||||
|
|
||||||
|
func fillFileStatFromSys(fs *fileStat, name string) {
|
||||||
|
fs.name = basename(name)
|
||||||
|
fs.size = fs.sys.Size
|
||||||
|
fs.modTime = time.Unix(fs.sys.Mtim.Unix())
|
||||||
|
fs.mode = FileMode(fs.sys.Mode & 0o777)
|
||||||
|
switch fs.sys.Mode & unix.S_IFMT {
|
||||||
|
case unix.S_IFBLK:
|
||||||
|
fs.mode |= ModeDevice
|
||||||
|
case unix.S_IFCHR:
|
||||||
|
fs.mode |= ModeDevice | ModeCharDevice
|
||||||
|
case unix.S_IFDIR:
|
||||||
|
fs.mode |= ModeDir
|
||||||
|
case unix.S_IFIFO:
|
||||||
|
fs.mode |= ModeNamedPipe
|
||||||
|
case unix.S_IFLNK:
|
||||||
|
fs.mode |= ModeSymlink
|
||||||
|
case unix.S_IFREG:
|
||||||
|
// nothing to do
|
||||||
|
case unix.S_IFSOCK:
|
||||||
|
fs.mode |= ModeSocket
|
||||||
|
}
|
||||||
|
if fs.sys.Mode&unix.S_ISGID != 0 {
|
||||||
|
fs.mode |= ModeSetgid
|
||||||
|
}
|
||||||
|
if fs.sys.Mode&unix.S_ISUID != 0 {
|
||||||
|
fs.mode |= ModeSetuid
|
||||||
|
}
|
||||||
|
if fs.sys.Mode&unix.S_ISVTX != 0 {
|
||||||
|
fs.mode |= ModeSticky
|
||||||
|
}
|
||||||
|
}
|
123
internal/ufs/walk.go
Normal file
123
internal/ufs/walk.go
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
// Code in this file was derived from `go/src/io/fs/walk.go`.
|
||||||
|
|
||||||
|
// Copyright 2020 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the `go.LICENSE` file.
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
iofs "io/fs"
|
||||||
|
"path"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SkipDir is used as a return value from [WalkDirFunc] to indicate that
|
||||||
|
// the directory named in the call is to be skipped. It is not returned
|
||||||
|
// as an error by any function.
|
||||||
|
var SkipDir = iofs.SkipDir
|
||||||
|
|
||||||
|
// SkipAll is used as a return value from [WalkDirFunc] to indicate that
|
||||||
|
// all remaining files and directories are to be skipped. It is not returned
|
||||||
|
// as an error by any function.
|
||||||
|
var SkipAll = iofs.SkipAll
|
||||||
|
|
||||||
|
// WalkDirFunc is the type of the function called by [WalkDir] to visit
|
||||||
|
// each file or directory.
|
||||||
|
//
|
||||||
|
// The path argument contains the argument to [WalkDir] as a prefix.
|
||||||
|
// That is, if WalkDir is called with root argument "dir" and finds a file
|
||||||
|
// named "a" in that directory, the walk function will be called with
|
||||||
|
// argument "dir/a".
|
||||||
|
//
|
||||||
|
// The d argument is the [DirEntry] for the named path.
|
||||||
|
//
|
||||||
|
// The error result returned by the function controls how [WalkDir]
|
||||||
|
// continues. If the function returns the special value [SkipDir], WalkDir
|
||||||
|
// skips the current directory (path if d.IsDir() is true, otherwise
|
||||||
|
// path's parent directory). If the function returns the special value
|
||||||
|
// [SkipAll], WalkDir skips all remaining files and directories. Otherwise,
|
||||||
|
// if the function returns a non-nil error, WalkDir stops entirely and
|
||||||
|
// returns that error.
|
||||||
|
//
|
||||||
|
// The err argument reports an error related to path, signaling that
|
||||||
|
// [WalkDir] will not walk into that directory. The function can decide how
|
||||||
|
// to handle that error; as described earlier, returning the error will
|
||||||
|
// cause WalkDir to stop walking the entire tree.
|
||||||
|
//
|
||||||
|
// [WalkDir] calls the function with a non-nil err argument in two cases.
|
||||||
|
//
|
||||||
|
// First, if the initial [Stat] on the root directory fails, WalkDir
|
||||||
|
// calls the function with path set to root, d set to nil, and err set to
|
||||||
|
// the error from [fs.Stat].
|
||||||
|
//
|
||||||
|
// Second, if a directory's ReadDir method (see [ReadDirFile]) fails, WalkDir calls the
|
||||||
|
// function with path set to the directory's path, d set to an
|
||||||
|
// [DirEntry] describing the directory, and err set to the error from
|
||||||
|
// ReadDir. In this second case, the function is called twice with the
|
||||||
|
// path of the directory: the first call is before the directory read is
|
||||||
|
// attempted and has err set to nil, giving the function a chance to
|
||||||
|
// return [SkipDir] or [SkipAll] and avoid the ReadDir entirely. The second call
|
||||||
|
// is after a failed ReadDir and reports the error from ReadDir.
|
||||||
|
// (If ReadDir succeeds, there is no second call.)
|
||||||
|
type WalkDirFunc func(path string, d DirEntry, err error) error
|
||||||
|
|
||||||
|
// WalkDir walks the file tree rooted at root, calling fn for each file or
|
||||||
|
// directory in the tree, including root.
|
||||||
|
//
|
||||||
|
// All errors that arise visiting files and directories are filtered by fn:
|
||||||
|
// see the [WalkDirFunc] documentation for details.
|
||||||
|
//
|
||||||
|
// The files are walked in lexical order, which makes the output deterministic
|
||||||
|
// but requires WalkDir to read an entire directory into memory before proceeding
|
||||||
|
// to walk that directory.
|
||||||
|
//
|
||||||
|
// WalkDir does not follow symbolic links found in directories,
|
||||||
|
// but if root itself is a symbolic link, its target will be walked.
|
||||||
|
func WalkDir(fs Filesystem, root string, fn WalkDirFunc) error {
|
||||||
|
info, err := fs.Stat(root)
|
||||||
|
if err != nil {
|
||||||
|
err = fn(root, nil, err)
|
||||||
|
} else {
|
||||||
|
err = walkDir(fs, root, iofs.FileInfoToDirEntry(info), fn)
|
||||||
|
}
|
||||||
|
if err == SkipDir || err == SkipAll {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// walkDir recursively descends path, calling walkDirFn.
|
||||||
|
func walkDir(fs Filesystem, name string, d DirEntry, walkDirFn WalkDirFunc) error {
|
||||||
|
if err := walkDirFn(name, d, nil); err != nil || !d.IsDir() {
|
||||||
|
if err == SkipDir && d.IsDir() {
|
||||||
|
// Successfully skipped directory.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dirs, err := fs.ReadDir(name)
|
||||||
|
if err != nil {
|
||||||
|
// Second call, to report ReadDir error.
|
||||||
|
err = walkDirFn(name, d, err)
|
||||||
|
if err != nil {
|
||||||
|
if err == SkipDir && d.IsDir() {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d1 := range dirs {
|
||||||
|
name1 := path.Join(name, d1.Name())
|
||||||
|
if err := walkDir(fs, name1, d1, walkDirFn); err != nil {
|
||||||
|
if err == SkipDir {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
298
internal/ufs/walk_unix.go
Normal file
298
internal/ufs/walk_unix.go
Normal file
|
@ -0,0 +1,298 @@
|
||||||
|
// SPDX-License-Identifier: BSD-2-Clause
|
||||||
|
|
||||||
|
// Some code in this file was derived from https://github.com/karrick/godirwalk.
|
||||||
|
|
||||||
|
//go:build unix
|
||||||
|
|
||||||
|
package ufs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
iofs "io/fs"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WalkDiratFunc func(dirfd int, name, relative string, d DirEntry, err error) error
|
||||||
|
|
||||||
|
func (fs *UnixFS) WalkDirat(dirfd int, name string, fn WalkDiratFunc) error {
|
||||||
|
if dirfd == 0 {
|
||||||
|
// TODO: proper validation, ideally a dedicated function.
|
||||||
|
dirfd = int(fs.dirfd.Load())
|
||||||
|
}
|
||||||
|
info, err := fs.Lstatat(dirfd, name)
|
||||||
|
if err != nil {
|
||||||
|
err = fn(dirfd, name, name, nil, err)
|
||||||
|
} else {
|
||||||
|
b := newScratchBuffer()
|
||||||
|
err = fs.walkDir(b, dirfd, name, name, iofs.FileInfoToDirEntry(info), fn)
|
||||||
|
}
|
||||||
|
if err == SkipDir || err == SkipAll {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) walkDir(b []byte, parentfd int, name, relative string, d DirEntry, walkDirFn WalkDiratFunc) error {
|
||||||
|
if err := walkDirFn(parentfd, name, relative, d, nil); err != nil || !d.IsDir() {
|
||||||
|
if err == SkipDir && d.IsDir() {
|
||||||
|
// Successfully skipped directory.
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dirfd, err := fs.openat(parentfd, name, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer unix.Close(dirfd)
|
||||||
|
|
||||||
|
dirs, err := fs.readDir(dirfd, name, b)
|
||||||
|
if err != nil {
|
||||||
|
// Second call, to report ReadDir error.
|
||||||
|
err = walkDirFn(dirfd, name, relative, d, err)
|
||||||
|
if err != nil {
|
||||||
|
if err == SkipDir && d.IsDir() {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, d1 := range dirs {
|
||||||
|
// TODO: the path.Join on this line may actually be partially incorrect.
|
||||||
|
// If we are not walking starting at the root, relative will contain the
|
||||||
|
// name of the directory we are starting the walk from, which will be
|
||||||
|
// relative to the root of the filesystem instead of from where the walk
|
||||||
|
// was initiated from.
|
||||||
|
//
|
||||||
|
// ref; https://github.com/pterodactyl/panel/issues/5030
|
||||||
|
if err := fs.walkDir(b, dirfd, d1.Name(), path.Join(relative, d1.Name()), d1, walkDirFn); err != nil {
|
||||||
|
if err == SkipDir {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDirMap .
|
||||||
|
// TODO: document
|
||||||
|
func ReadDirMap[T any](fs *UnixFS, path string, fn func(DirEntry) (T, error)) ([]T, error) {
|
||||||
|
dirfd, name, closeFd, err := fs.safePath(path)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fd, err := fs.openat(dirfd, name, O_DIRECTORY|O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer unix.Close(fd)
|
||||||
|
|
||||||
|
entries, err := fs.readDir(fd, ".", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
out := make([]T, len(entries))
|
||||||
|
for i, e := range entries {
|
||||||
|
idx := i
|
||||||
|
e := e
|
||||||
|
v, err := fn(e)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out[idx] = v
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// nameOffset is a compile time constant
|
||||||
|
const nameOffset = int(unsafe.Offsetof(unix.Dirent{}.Name))
|
||||||
|
|
||||||
|
func nameFromDirent(de *unix.Dirent) (name []byte) {
|
||||||
|
// Because this GOOS' syscall.Dirent does not provide a field that specifies
|
||||||
|
// the name length, this function must first calculate the max possible name
|
||||||
|
// length, and then search for the NULL byte.
|
||||||
|
ml := int(de.Reclen) - nameOffset
|
||||||
|
|
||||||
|
// Convert syscall.Dirent.Name, which is array of int8, to []byte, by
|
||||||
|
// overwriting Cap, Len, and Data slice header fields to the max possible
|
||||||
|
// name length computed above, and finding the terminating NULL byte.
|
||||||
|
//
|
||||||
|
// TODO: is there an alternative to the deprecated SliceHeader?
|
||||||
|
// SliceHeader was mainly deprecated due to it being misused for avoiding
|
||||||
|
// allocations when converting a byte slice to a string, ref;
|
||||||
|
// https://go.dev/issue/53003
|
||||||
|
sh := (*reflect.SliceHeader)(unsafe.Pointer(&name))
|
||||||
|
sh.Cap = ml
|
||||||
|
sh.Len = ml
|
||||||
|
sh.Data = uintptr(unsafe.Pointer(&de.Name[0]))
|
||||||
|
|
||||||
|
if index := bytes.IndexByte(name, 0); index >= 0 {
|
||||||
|
// Found NULL byte; set slice's cap and len accordingly.
|
||||||
|
sh.Cap = index
|
||||||
|
sh.Len = index
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: This branch is not expected, but included for defensive
|
||||||
|
// programming, and provides a hard stop on the name based on the structure
|
||||||
|
// field array size.
|
||||||
|
sh.Cap = len(de.Name)
|
||||||
|
sh.Len = sh.Cap
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// modeTypeFromDirent converts a syscall defined constant, which is in purview
|
||||||
|
// of OS, to a constant defined by Go, assumed by this project to be stable.
|
||||||
|
//
|
||||||
|
// When the syscall constant is not recognized, this function falls back to a
|
||||||
|
// Stat on the file system.
|
||||||
|
func (fs *UnixFS) modeTypeFromDirent(fd int, de *unix.Dirent, osDirname, osBasename string) (FileMode, error) {
|
||||||
|
switch de.Type {
|
||||||
|
case unix.DT_REG:
|
||||||
|
return 0, nil
|
||||||
|
case unix.DT_DIR:
|
||||||
|
return ModeDir, nil
|
||||||
|
case unix.DT_LNK:
|
||||||
|
return ModeSymlink, nil
|
||||||
|
case unix.DT_CHR:
|
||||||
|
return ModeDevice | ModeCharDevice, nil
|
||||||
|
case unix.DT_BLK:
|
||||||
|
return ModeDevice, nil
|
||||||
|
case unix.DT_FIFO:
|
||||||
|
return ModeNamedPipe, nil
|
||||||
|
case unix.DT_SOCK:
|
||||||
|
return ModeSocket, nil
|
||||||
|
default:
|
||||||
|
// If syscall returned unknown type (e.g., DT_UNKNOWN, DT_WHT), then
|
||||||
|
// resolve actual mode by reading file information.
|
||||||
|
return fs.modeType(fd, filepath.Join(osDirname, osBasename))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// modeType returns the mode type of the file system entry identified by
|
||||||
|
// osPathname by calling os.LStat function, to intentionally not follow symbolic
|
||||||
|
// links.
|
||||||
|
//
|
||||||
|
// Even though os.LStat provides all file mode bits, we want to ensure same
|
||||||
|
// values returned to caller regardless of whether we obtained file mode bits
|
||||||
|
// from syscall or stat call. Therefore, mask out the additional file mode bits
|
||||||
|
// that are provided by stat but not by the syscall, so users can rely on their
|
||||||
|
// values.
|
||||||
|
func (fs *UnixFS) modeType(dirfd int, name string) (os.FileMode, error) {
|
||||||
|
fi, err := fs.Lstatat(dirfd, name)
|
||||||
|
if err == nil {
|
||||||
|
return fi.Mode() & ModeType, nil
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var minimumScratchBufferSize = os.Getpagesize()
|
||||||
|
|
||||||
|
func newScratchBuffer() []byte {
|
||||||
|
return make([]byte, minimumScratchBufferSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *UnixFS) readDir(fd int, name string, b []byte) ([]DirEntry, error) {
|
||||||
|
scratchBuffer := b
|
||||||
|
if scratchBuffer == nil || len(scratchBuffer) < minimumScratchBufferSize {
|
||||||
|
scratchBuffer = newScratchBuffer()
|
||||||
|
}
|
||||||
|
|
||||||
|
var entries []DirEntry
|
||||||
|
var workBuffer []byte
|
||||||
|
|
||||||
|
var sde unix.Dirent
|
||||||
|
for {
|
||||||
|
if len(workBuffer) == 0 {
|
||||||
|
n, err := unix.Getdents(fd, scratchBuffer)
|
||||||
|
if err != nil {
|
||||||
|
if err == unix.EINTR {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, convertErrorType(err)
|
||||||
|
}
|
||||||
|
if n <= 0 {
|
||||||
|
// end of directory: normal exit
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
workBuffer = scratchBuffer[:n] // trim work buffer to number of bytes read
|
||||||
|
}
|
||||||
|
|
||||||
|
// "Go is like C, except that you just put `unsafe` all over the place".
|
||||||
|
copy((*[unsafe.Sizeof(unix.Dirent{})]byte)(unsafe.Pointer(&sde))[:], workBuffer)
|
||||||
|
workBuffer = workBuffer[sde.Reclen:] // advance buffer for next iteration through loop
|
||||||
|
|
||||||
|
if sde.Ino == 0 {
|
||||||
|
continue // inode set to 0 indicates an entry that was marked as deleted
|
||||||
|
}
|
||||||
|
|
||||||
|
nameSlice := nameFromDirent(&sde)
|
||||||
|
nameLength := len(nameSlice)
|
||||||
|
|
||||||
|
if nameLength == 0 || (nameSlice[0] == '.' && (nameLength == 1 || (nameLength == 2 && nameSlice[1] == '.'))) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
childName := string(nameSlice)
|
||||||
|
mt, err := fs.modeTypeFromDirent(fd, &sde, name, childName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, convertErrorType(err)
|
||||||
|
}
|
||||||
|
entries = append(entries, &dirent{name: childName, path: name, modeType: mt, dirfd: fd, fs: fs})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// dirent stores the name and file system mode type of discovered file system
|
||||||
|
// entries.
|
||||||
|
type dirent struct {
|
||||||
|
name string
|
||||||
|
path string
|
||||||
|
modeType FileMode
|
||||||
|
|
||||||
|
dirfd int
|
||||||
|
fs *UnixFS
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) Name() string {
|
||||||
|
return de.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) IsDir() bool {
|
||||||
|
return de.modeType&ModeDir != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) Type() FileMode {
|
||||||
|
return de.modeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) Info() (FileInfo, error) {
|
||||||
|
if de.fs == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return de.fs.Lstatat(de.dirfd, de.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (de dirent) Open() (File, error) {
|
||||||
|
if de.fs == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return de.fs.OpenFileat(de.dirfd, de.name, O_RDONLY, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset releases memory held by entry err and name, and resets mode type to 0.
|
||||||
|
func (de *dirent) reset() {
|
||||||
|
de.name = ""
|
||||||
|
de.path = ""
|
||||||
|
de.modeType = 0
|
||||||
|
}
|
|
@ -2,8 +2,6 @@ package parser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -29,24 +27,14 @@ var configMatchRegex = regexp.MustCompile(`{{\s?config\.([\w.-]+)\s?}}`)
|
||||||
// matching:
|
// matching:
|
||||||
//
|
//
|
||||||
// <Root>
|
// <Root>
|
||||||
// <Property value="testing"/>
|
//
|
||||||
|
// <Property value="testing"/>
|
||||||
|
//
|
||||||
// </Root>
|
// </Root>
|
||||||
//
|
//
|
||||||
// noinspection RegExpRedundantEscape
|
// noinspection RegExpRedundantEscape
|
||||||
var xmlValueMatchRegex = regexp.MustCompile(`^\[([\w]+)='(.*)'\]$`)
|
var xmlValueMatchRegex = regexp.MustCompile(`^\[([\w]+)='(.*)'\]$`)
|
||||||
|
|
||||||
// Gets the []byte representation of a configuration file to be passed through to other
|
|
||||||
// handler functions. If the file does not currently exist, it will be created.
|
|
||||||
func readFileBytes(path string) ([]byte, error) {
|
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
return io.ReadAll(file)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets the value of a key based on the value type defined.
|
// Gets the value of a key based on the value type defined.
|
||||||
func (cfr *ConfigurationFileReplacement) getKeyValue(value string) interface{} {
|
func (cfr *ConfigurationFileReplacement) getKeyValue(value string) interface{} {
|
||||||
if cfr.ReplaceWith.Type() == jsonparser.Boolean {
|
if cfr.ReplaceWith.Type() == jsonparser.Boolean {
|
||||||
|
|
252
parser/parser.go
252
parser/parser.go
|
@ -2,8 +2,8 @@ package parser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"os"
|
"bytes"
|
||||||
"path/filepath"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ import (
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The file parsing options that are available for a server configuration file.
|
// The file parsing options that are available for a server configuration file.
|
||||||
|
@ -74,6 +75,26 @@ func (cv *ReplaceValue) String() string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cv *ReplaceValue) Bytes() []byte {
|
||||||
|
switch cv.Type() {
|
||||||
|
case jsonparser.String:
|
||||||
|
var stackbuf [64]byte
|
||||||
|
bU, err := jsonparser.Unescape(cv.value, stackbuf[:])
|
||||||
|
if err != nil {
|
||||||
|
panic(errors.Wrap(err, "parser: could not parse value"))
|
||||||
|
}
|
||||||
|
return bU
|
||||||
|
case jsonparser.Null:
|
||||||
|
return []byte("<nil>")
|
||||||
|
case jsonparser.Boolean:
|
||||||
|
return cv.value
|
||||||
|
case jsonparser.Number:
|
||||||
|
return cv.value
|
||||||
|
default:
|
||||||
|
return []byte("<invalid>")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ConfigurationParser string
|
type ConfigurationParser string
|
||||||
|
|
||||||
func (cp ConfigurationParser) String() string {
|
func (cp ConfigurationParser) String() string {
|
||||||
|
@ -167,11 +188,12 @@ func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a given configuration file and updates all of the values within as defined
|
// Parse parses a given configuration file and updates all the values within
|
||||||
// in the API response from the Panel.
|
// as defined in the API response from the Panel.
|
||||||
func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
func (f *ConfigurationFile) Parse(file ufs.File) error {
|
||||||
log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
|
//log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
|
||||||
|
|
||||||
|
// What the fuck is going on here?
|
||||||
if mb, err := json.Marshal(config.Get()); err != nil {
|
if mb, err := json.Marshal(config.Get()); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
|
@ -182,56 +204,24 @@ func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
||||||
|
|
||||||
switch f.Parser {
|
switch f.Parser {
|
||||||
case Properties:
|
case Properties:
|
||||||
err = f.parsePropertiesFile(path)
|
err = f.parsePropertiesFile(file)
|
||||||
break
|
|
||||||
case File:
|
case File:
|
||||||
err = f.parseTextFile(path)
|
err = f.parseTextFile(file)
|
||||||
break
|
|
||||||
case Yaml, "yml":
|
case Yaml, "yml":
|
||||||
err = f.parseYamlFile(path)
|
err = f.parseYamlFile(file)
|
||||||
break
|
|
||||||
case Json:
|
case Json:
|
||||||
err = f.parseJsonFile(path)
|
err = f.parseJsonFile(file)
|
||||||
break
|
|
||||||
case Ini:
|
case Ini:
|
||||||
err = f.parseIniFile(path)
|
err = f.parseIniFile(file)
|
||||||
break
|
|
||||||
case Xml:
|
case Xml:
|
||||||
err = f.parseXmlFile(path)
|
err = f.parseXmlFile(file)
|
||||||
break
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
// File doesn't exist, we tried creating it, and same error is returned? Pretty
|
|
||||||
// sure this pathway is impossible, but if not, abort here.
|
|
||||||
if internal {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
b := strings.TrimSuffix(path, filepath.Base(path))
|
|
||||||
if err := os.MkdirAll(b, 0o755); err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to create base directory for missing configuration file")
|
|
||||||
} else {
|
|
||||||
if _, err := os.Create(path); err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to create missing configuration file")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return f.Parse(path, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses an xml file.
|
// Parses an xml file.
|
||||||
func (f *ConfigurationFile) parseXmlFile(path string) error {
|
func (f *ConfigurationFile) parseXmlFile(file ufs.File) error {
|
||||||
doc := etree.NewDocument()
|
doc := etree.NewDocument()
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
if _, err := doc.ReadFrom(file); err != nil {
|
if _, err := doc.ReadFrom(file); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -291,41 +281,27 @@ func (f *ConfigurationFile) parseXmlFile(path string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If you don't truncate the file you'll end up duplicating the data in there (or just appending
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
// to the end of the file. We don't want to do that.
|
return err
|
||||||
|
}
|
||||||
if err := file.Truncate(0); err != nil {
|
if err := file.Truncate(0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move the cursor to the start of the file to avoid weird spacing issues.
|
|
||||||
file.Seek(0, 0)
|
|
||||||
|
|
||||||
// Ensure the XML is indented properly.
|
// Ensure the XML is indented properly.
|
||||||
doc.Indent(2)
|
doc.Indent(2)
|
||||||
|
|
||||||
// Truncate the file before attempting to write the changes.
|
// Write the XML to the file.
|
||||||
if err := os.Truncate(path, 0); err != nil {
|
if _, err := doc.WriteTo(file); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
// Write the XML to the file.
|
|
||||||
_, err = doc.WriteTo(file)
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses an ini file.
|
// Parses an ini file.
|
||||||
func (f *ConfigurationFile) parseIniFile(path string) error {
|
func (f *ConfigurationFile) parseIniFile(file ufs.File) error {
|
||||||
// Ini package can't handle a non-existent file, so handle that automatically here
|
// Wrap the file in a NopCloser so the ini package doesn't close the file.
|
||||||
// by creating it if not exists. Then, immediately close the file since we will use
|
cfg, err := ini.Load(io.NopCloser(file))
|
||||||
// other methods to write the new contents.
|
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
file.Close()
|
|
||||||
|
|
||||||
cfg, err := ini.Load(path)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -388,14 +364,24 @@ func (f *ConfigurationFile) parseIniFile(path string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return cfg.SaveTo(path)
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := cfg.WriteTo(file); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a json file updating any matching key/value pairs. If a match is not found, the
|
// Parses a json file updating any matching key/value pairs. If a match is not found, the
|
||||||
// value is set regardless in the file. See the commentary in parseYamlFile for more details
|
// value is set regardless in the file. See the commentary in parseYamlFile for more details
|
||||||
// about what is happening during this process.
|
// about what is happening during this process.
|
||||||
func (f *ConfigurationFile) parseJsonFile(path string) error {
|
func (f *ConfigurationFile) parseJsonFile(file ufs.File) error {
|
||||||
b, err := readFileBytes(path)
|
b, err := io.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -405,14 +391,24 @@ func (f *ConfigurationFile) parseJsonFile(path string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
output := []byte(data.StringIndent("", " "))
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
return os.WriteFile(path, output, 0o644)
|
return err
|
||||||
|
}
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the data to the file.
|
||||||
|
if _, err := io.Copy(file, bytes.NewReader(data.BytesIndent("", " "))); err != nil {
|
||||||
|
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a yaml file and updates any matching key/value pairs before persisting
|
// Parses a yaml file and updates any matching key/value pairs before persisting
|
||||||
// it back to the disk.
|
// it back to the disk.
|
||||||
func (f *ConfigurationFile) parseYamlFile(path string) error {
|
func (f *ConfigurationFile) parseYamlFile(file ufs.File) error {
|
||||||
b, err := readFileBytes(path)
|
b, err := io.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -443,35 +439,56 @@ func (f *ConfigurationFile) parseYamlFile(path string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return os.WriteFile(path, marshaled, 0o644)
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the data to the file.
|
||||||
|
if _, err := io.Copy(file, bytes.NewReader(marshaled)); err != nil {
|
||||||
|
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a text file using basic find and replace. This is a highly inefficient method of
|
// Parses a text file using basic find and replace. This is a highly inefficient method of
|
||||||
// scanning a file and performing a replacement. You should attempt to use anything other
|
// scanning a file and performing a replacement. You should attempt to use anything other
|
||||||
// than this function where possible.
|
// than this function where possible.
|
||||||
func (f *ConfigurationFile) parseTextFile(path string) error {
|
func (f *ConfigurationFile) parseTextFile(file ufs.File) error {
|
||||||
input, err := os.ReadFile(path)
|
b := bytes.NewBuffer(nil)
|
||||||
if err != nil {
|
s := bufio.NewScanner(file)
|
||||||
return err
|
var replaced bool
|
||||||
}
|
for s.Scan() {
|
||||||
|
line := s.Bytes()
|
||||||
lines := strings.Split(string(input), "\n")
|
replaced = false
|
||||||
for i, line := range lines {
|
|
||||||
for _, replace := range f.Replace {
|
for _, replace := range f.Replace {
|
||||||
// If this line doesn't match what we expect for the replacement, move on to the next
|
// If this line doesn't match what we expect for the replacement, move on to the next
|
||||||
// line. Otherwise, update the line to have the replacement value.
|
// line. Otherwise, update the line to have the replacement value.
|
||||||
if !strings.HasPrefix(line, replace.Match) {
|
if !bytes.HasPrefix(line, []byte(replace.Match)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
b.Write(replace.ReplaceWith.Bytes())
|
||||||
lines[i] = replace.ReplaceWith.String()
|
replaced = true
|
||||||
}
|
}
|
||||||
|
if !replaced {
|
||||||
|
b.Write(line)
|
||||||
|
}
|
||||||
|
b.WriteByte('\n')
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.WriteFile(path, []byte(strings.Join(lines, "\n")), 0o644); err != nil {
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := file.Truncate(0); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write the data to the file.
|
||||||
|
if _, err := io.Copy(file, b); err != nil {
|
||||||
|
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -501,31 +518,29 @@ func (f *ConfigurationFile) parseTextFile(path string) error {
|
||||||
//
|
//
|
||||||
// @see https://github.com/pterodactyl/panel/issues/2308 (original)
|
// @see https://github.com/pterodactyl/panel/issues/2308 (original)
|
||||||
// @see https://github.com/pterodactyl/panel/issues/3009 ("bug" introduced as result)
|
// @see https://github.com/pterodactyl/panel/issues/3009 ("bug" introduced as result)
|
||||||
func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
func (f *ConfigurationFile) parsePropertiesFile(file ufs.File) error {
|
||||||
var s strings.Builder
|
b, err := io.ReadAll(file)
|
||||||
// Open the file and attempt to load any comments that currenty exist at the start
|
if err != nil {
|
||||||
// of the file. This is kind of a hack, but should work for a majority of users for
|
return err
|
||||||
// the time being.
|
|
||||||
if fd, err := os.Open(path); err != nil {
|
|
||||||
return errors.Wrap(err, "parser: could not open file for reading")
|
|
||||||
} else {
|
|
||||||
scanner := bufio.NewScanner(fd)
|
|
||||||
// Scan until we hit a line that is not a comment that actually has content
|
|
||||||
// on it. Keep appending the comments until that time.
|
|
||||||
for scanner.Scan() {
|
|
||||||
text := scanner.Text()
|
|
||||||
if len(text) > 0 && text[0] != '#' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s.WriteString(text + "\n")
|
|
||||||
}
|
|
||||||
_ = fd.Close()
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return errors.WithStackIf(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := properties.LoadFile(path, properties.UTF8)
|
s := bytes.NewBuffer(nil)
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(b))
|
||||||
|
// Scan until we hit a line that is not a comment that actually has content
|
||||||
|
// on it. Keep appending the comments until that time.
|
||||||
|
for scanner.Scan() {
|
||||||
|
text := scanner.Bytes()
|
||||||
|
if len(text) > 0 && text[0] != '#' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.Write(text)
|
||||||
|
s.WriteByte('\n')
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, err := properties.Load(b, properties.UTF8)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "parser: could not load properties file for configuration update")
|
return errors.Wrap(err, "parser: could not load properties file for configuration update")
|
||||||
}
|
}
|
||||||
|
@ -563,17 +578,16 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
||||||
s.WriteString(key + "=" + strings.Trim(strconv.QuoteToASCII(value), "\"") + "\n")
|
s.WriteString(key + "=" + strings.Trim(strconv.QuoteToASCII(value), "\"") + "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open the file for writing.
|
if _, err := file.Seek(0, io.SeekStart); err != nil {
|
||||||
w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer w.Close()
|
if err := file.Truncate(0); err != nil {
|
||||||
|
return err
|
||||||
// Write the data to the file.
|
|
||||||
if _, err := w.Write([]byte(s.String())); err != nil {
|
|
||||||
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write the data to the file.
|
||||||
|
if _, err := io.Copy(file, s); err != nil {
|
||||||
|
return errors.Wrap(err, "parser: failed to write properties file to disk")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,20 +20,58 @@ import (
|
||||||
"github.com/pterodactyl/wings/server"
|
"github.com/pterodactyl/wings/server"
|
||||||
)
|
)
|
||||||
|
|
||||||
var client = &http.Client{
|
var client *http.Client
|
||||||
Timeout: time.Hour * 12,
|
|
||||||
// Disallow any redirect on an HTTP call. This is a security requirement: do not modify
|
func init() {
|
||||||
// this logic without first ensuring that the new target location IS NOT within the current
|
dialer := &net.Dialer{
|
||||||
// instance's local network.
|
LocalAddr: nil,
|
||||||
//
|
}
|
||||||
// This specific error response just causes the client to not follow the redirect and
|
|
||||||
// returns the actual redirect response to the caller. Not perfect, but simple and most
|
trnspt := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
// people won't be using URLs that redirect anyways hopefully?
|
trnspt.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||||
//
|
c, err := dialer.DialContext(ctx, network, addr)
|
||||||
// We'll re-evaluate this down the road if needed.
|
if err != nil {
|
||||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
return nil, errors.WithStack(err)
|
||||||
return http.ErrUseLastResponse
|
}
|
||||||
},
|
|
||||||
|
ipStr, _, err := net.SplitHostPort(c.RemoteAddr().String())
|
||||||
|
if err != nil {
|
||||||
|
return c, errors.WithStack(err)
|
||||||
|
}
|
||||||
|
ip := net.ParseIP(ipStr)
|
||||||
|
if ip == nil {
|
||||||
|
return c, errors.WithStack(ErrInvalidIPAddress)
|
||||||
|
}
|
||||||
|
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
|
||||||
|
return c, errors.WithStack(ErrInternalResolution)
|
||||||
|
}
|
||||||
|
for _, block := range internalRanges {
|
||||||
|
if !block.Contains(ip) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return c, errors.WithStack(ErrInternalResolution)
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
client = &http.Client{
|
||||||
|
Timeout: time.Hour * 12,
|
||||||
|
|
||||||
|
Transport: trnspt,
|
||||||
|
|
||||||
|
// Disallow any redirect on an HTTP call. This is a security requirement: do not modify
|
||||||
|
// this logic without first ensuring that the new target location IS NOT within the current
|
||||||
|
// instance's local network.
|
||||||
|
//
|
||||||
|
// This specific error response just causes the client to not follow the redirect and
|
||||||
|
// returns the actual redirect response to the caller. Not perfect, but simple and most
|
||||||
|
// people won't be using URLs that redirect anyways hopefully?
|
||||||
|
//
|
||||||
|
// We'll re-evaluate this down the road if needed.
|
||||||
|
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
||||||
|
return http.ErrUseLastResponse
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var instance = &Downloader{
|
var instance = &Downloader{
|
||||||
|
@ -143,12 +181,6 @@ func (dl *Download) Execute() error {
|
||||||
dl.cancelFunc = &cancel
|
dl.cancelFunc = &cancel
|
||||||
defer dl.Cancel()
|
defer dl.Cancel()
|
||||||
|
|
||||||
// Always ensure that we're checking the destination for the download to avoid a malicious
|
|
||||||
// user from accessing internal network resources.
|
|
||||||
if err := dl.isExternalNetwork(ctx); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point we have verified the destination is not within the local network, so we can
|
// At this point we have verified the destination is not within the local network, so we can
|
||||||
// now make a request to that URL and pull down the file, saving it to the server's data
|
// now make a request to that URL and pull down the file, saving it to the server's data
|
||||||
// directory.
|
// directory.
|
||||||
|
@ -167,13 +199,8 @@ func (dl *Download) Execute() error {
|
||||||
return errors.New("downloader: got bad response status from endpoint: " + res.Status)
|
return errors.New("downloader: got bad response status from endpoint: " + res.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a Content-Length header on this request go ahead and check that we can
|
if res.ContentLength < 1 {
|
||||||
// even write the whole file before beginning this process. If there is no header present
|
return errors.New("downloader: request is missing ContentLength")
|
||||||
// we'll just have to give it a spin and see how it goes.
|
|
||||||
if res.ContentLength > 0 {
|
|
||||||
if err := dl.server.Filesystem().HasSpaceFor(res.ContentLength); err != nil {
|
|
||||||
return errors.WrapIf(err, "downloader: failed to write file: not enough space")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if dl.req.UseHeader {
|
if dl.req.UseHeader {
|
||||||
|
@ -200,8 +227,10 @@ func (dl *Download) Execute() error {
|
||||||
p := dl.Path()
|
p := dl.Path()
|
||||||
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
|
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
|
||||||
|
|
||||||
|
// Write the file while tracking the progress, Write will check that the
|
||||||
|
// size of the file won't exceed the disk limit.
|
||||||
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
|
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
|
||||||
if err := dl.server.Filesystem().Writefile(p, r); err != nil {
|
if err := dl.server.Filesystem().Write(p, r, res.ContentLength, 0o644); err != nil {
|
||||||
return errors.WrapIf(err, "downloader: failed to write file to server directory")
|
return errors.WrapIf(err, "downloader: failed to write file to server directory")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -246,59 +275,6 @@ func (dl *Download) counter(contentLength int64) *Counter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verifies that a given download resolves to a location not within the current local
|
|
||||||
// network for the machine. If the final destination of a resource is within the local
|
|
||||||
// network an ErrInternalResolution error is returned.
|
|
||||||
func (dl *Download) isExternalNetwork(ctx context.Context) error {
|
|
||||||
dialer := &net.Dialer{
|
|
||||||
LocalAddr: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
host := dl.req.URL.Host
|
|
||||||
|
|
||||||
// This cluster-fuck of math and integer shit converts an integer IP into a proper IPv4.
|
|
||||||
// For example: 16843009 would become 1.1.1.1
|
|
||||||
//if i, err := strconv.ParseInt(host, 10, 64); err == nil {
|
|
||||||
// host = strconv.FormatInt((i>>24)&0xFF, 10) + "." + strconv.FormatInt((i>>16)&0xFF, 10) + "." + strconv.FormatInt((i>>8)&0xFF, 10) + "." + strconv.FormatInt(i&0xFF, 10)
|
|
||||||
//}
|
|
||||||
|
|
||||||
if _, _, err := net.SplitHostPort(host); err != nil {
|
|
||||||
if !strings.Contains(err.Error(), "missing port in address") {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
switch dl.req.URL.Scheme {
|
|
||||||
case "http":
|
|
||||||
host += ":80"
|
|
||||||
case "https":
|
|
||||||
host += ":443"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c, err := dialer.DialContext(ctx, "tcp", host)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
_ = c.Close()
|
|
||||||
|
|
||||||
ipStr, _, err := net.SplitHostPort(c.RemoteAddr().String())
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
ip := net.ParseIP(ipStr)
|
|
||||||
if ip == nil {
|
|
||||||
return errors.WithStack(ErrInvalidIPAddress)
|
|
||||||
}
|
|
||||||
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
|
|
||||||
return errors.WithStack(ErrInternalResolution)
|
|
||||||
}
|
|
||||||
for _, block := range internalRanges {
|
|
||||||
if block.Contains(ip) {
|
|
||||||
return errors.WithStack(ErrInternalResolution)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Downloader represents a global downloader that keeps track of all currently processing downloads
|
// Downloader represents a global downloader that keeps track of all currently processing downloads
|
||||||
// for the machine.
|
// for the machine.
|
||||||
type Downloader struct {
|
type Downloader struct {
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/router/middleware"
|
"github.com/pterodactyl/wings/router/middleware"
|
||||||
"github.com/pterodactyl/wings/router/tokens"
|
"github.com/pterodactyl/wings/router/tokens"
|
||||||
|
@ -19,12 +20,14 @@ func getDownloadBackup(c *gin.Context) {
|
||||||
client := middleware.ExtractApiClient(c)
|
client := middleware.ExtractApiClient(c)
|
||||||
manager := middleware.ExtractManager(c)
|
manager := middleware.ExtractManager(c)
|
||||||
|
|
||||||
|
// Get the payload from the token.
|
||||||
token := tokens.BackupPayload{}
|
token := tokens.BackupPayload{}
|
||||||
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get the server using the UUID from the token.
|
||||||
if _, ok := manager.Get(token.ServerUuid); !ok || !token.IsUniqueRequest() {
|
if _, ok := manager.Get(token.ServerUuid); !ok || !token.IsUniqueRequest() {
|
||||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||||
"error": "The requested resource was not found on this server.",
|
"error": "The requested resource was not found on this server.",
|
||||||
|
@ -32,6 +35,14 @@ func getDownloadBackup(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate that the BackupUuid field is actually a UUID and not some random characters or a
|
||||||
|
// file path.
|
||||||
|
if _, err := uuid.Parse(token.BackupUuid); err != nil {
|
||||||
|
middleware.CaptureAndAbort(c, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Locate the backup on the local disk.
|
||||||
b, st, err := backup.LocateLocal(client, token.BackupUuid)
|
b, st, err := backup.LocateLocal(client, token.BackupUuid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
@ -45,6 +56,8 @@ func getDownloadBackup(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The use of `os` here is safe as backups are not stored within server
|
||||||
|
// accessible directories.
|
||||||
f, err := os.Open(b.Path())
|
f, err := os.Open(b.Path())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
|
@ -76,26 +89,19 @@ func getDownloadFile(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p, _ := s.Filesystem().SafePath(token.FilePath)
|
f, st, err := s.Filesystem().File(token.FilePath)
|
||||||
st, err := os.Stat(p)
|
|
||||||
// If there is an error or we're somehow trying to download a directory, just
|
|
||||||
// respond with the appropriate error.
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
return
|
return
|
||||||
} else if st.IsDir() {
|
}
|
||||||
|
defer f.Close()
|
||||||
|
if st.IsDir() {
|
||||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
|
||||||
"error": "The requested resource was not found on this server.",
|
"error": "The requested resource was not found on this server.",
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.Open(p)
|
|
||||||
if err != nil {
|
|
||||||
middleware.CaptureAndAbort(c, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
|
||||||
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
|
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
|
||||||
c.Header("Content-Type", "application/octet-stream")
|
c.Header("Content-Type", "application/octet-stream")
|
||||||
|
|
|
@ -227,19 +227,19 @@ func deleteServer(c *gin.Context) {
|
||||||
//
|
//
|
||||||
// In addition, servers with large amounts of files can take some time to finish deleting,
|
// In addition, servers with large amounts of files can take some time to finish deleting,
|
||||||
// so we don't want to block the HTTP call while waiting on this.
|
// so we don't want to block the HTTP call while waiting on this.
|
||||||
go func(p string) {
|
go func(s *server.Server) {
|
||||||
|
fs := s.Filesystem()
|
||||||
|
p := fs.Path()
|
||||||
|
_ = fs.UnixFS().Close()
|
||||||
if err := os.RemoveAll(p); err != nil {
|
if err := os.RemoveAll(p); err != nil {
|
||||||
log.WithFields(log.Fields{"path": p, "error": err}).Warn("failed to remove server files during deletion process")
|
log.WithFields(log.Fields{"path": p, "error": err}).Warn("failed to remove server files during deletion process")
|
||||||
}
|
}
|
||||||
}(s.Filesystem().Path())
|
}(s)
|
||||||
|
|
||||||
middleware.ExtractManager(c).Remove(func(server *server.Server) bool {
|
middleware.ExtractManager(c).Remove(func(server *server.Server) bool {
|
||||||
return server.ID() == s.ID()
|
return server.ID() == s.ID()
|
||||||
})
|
})
|
||||||
|
|
||||||
// Deallocate the reference to this server.
|
|
||||||
s = nil
|
|
||||||
|
|
||||||
c.Status(http.StatusNoContent)
|
c.Status(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ import (
|
||||||
// getServerFileContents returns the contents of a file on the server.
|
// getServerFileContents returns the contents of a file on the server.
|
||||||
func getServerFileContents(c *gin.Context) {
|
func getServerFileContents(c *gin.Context) {
|
||||||
s := middleware.ExtractServer(c)
|
s := middleware.ExtractServer(c)
|
||||||
p := "/" + strings.TrimLeft(c.Query("file"), "/")
|
p := strings.TrimLeft(c.Query("file"), "/")
|
||||||
f, st, err := s.Filesystem().File(p)
|
f, st, err := s.Filesystem().File(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
|
@ -129,7 +129,6 @@ func putServerRenameFiles(c *gin.Context) {
|
||||||
}
|
}
|
||||||
if err := fs.Rename(pf, pt); err != nil {
|
if err := fs.Rename(pf, pt); err != nil {
|
||||||
// Return nil if the error is an is not exists.
|
// Return nil if the error is an is not exists.
|
||||||
// NOTE: os.IsNotExist() does not work if the error is wrapped.
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
s.Log().WithField("error", err).
|
s.Log().WithField("error", err).
|
||||||
WithField("from_path", pf).
|
WithField("from_path", pf).
|
||||||
|
@ -239,7 +238,16 @@ func postServerWriteFile(c *gin.Context) {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil {
|
|
||||||
|
// A content length of -1 means the actual length is unknown.
|
||||||
|
if c.Request.ContentLength == -1 {
|
||||||
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||||
|
"error": "Missing Content-Length",
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.Filesystem().Write(f, c.Request.Body, c.Request.ContentLength, 0o644); err != nil {
|
||||||
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
|
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
|
||||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
|
||||||
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
|
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
|
||||||
|
@ -589,15 +597,9 @@ func postServerUploadFiles(c *gin.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, header := range headers {
|
for _, header := range headers {
|
||||||
p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename))
|
|
||||||
if err != nil {
|
|
||||||
middleware.CaptureAndAbort(c, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// We run this in a different method so I can use defer without any of
|
// We run this in a different method so I can use defer without any of
|
||||||
// the consequences caused by calling it in a loop.
|
// the consequences caused by calling it in a loop.
|
||||||
if err := handleFileUpload(p, s, header); err != nil {
|
if err := handleFileUpload(filepath.Join(directory, header.Filename), s, header); err != nil {
|
||||||
middleware.CaptureAndAbort(c, err)
|
middleware.CaptureAndAbort(c, err)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
|
@ -619,7 +621,8 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader)
|
||||||
if err := s.Filesystem().IsIgnored(p); err != nil {
|
if err := s.Filesystem().IsIgnored(p); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.Filesystem().Writefile(p, file); err != nil {
|
|
||||||
|
if err := s.Filesystem().Write(p, file, header.Size, 0o644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
|
@ -63,11 +64,11 @@ func postServerTransfer(c *gin.Context) {
|
||||||
if s.Environment.State() != environment.ProcessOfflineState {
|
if s.Environment.State() != environment.ProcessOfflineState {
|
||||||
if err := s.Environment.WaitForStop(
|
if err := s.Environment.WaitForStop(
|
||||||
s.Context(),
|
s.Context(),
|
||||||
time.Minute,
|
time.Second*15,
|
||||||
false,
|
false,
|
||||||
); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") {
|
); err != nil && !strings.Contains(strings.ToLower(err.Error()), "no such container") {
|
||||||
notifyPanelOfFailure()
|
s.SetTransferring(false)
|
||||||
s.Log().WithError(err).Error("failed to stop server for transfer")
|
middleware.CaptureAndAbort(c, errors.Wrap(err, "failed to stop server for transfer"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,7 +58,7 @@ func getServerWebsocket(c *gin.Context) {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
break
|
break
|
||||||
case <-s.Context().Done():
|
case <-s.Context().Done():
|
||||||
handler.Connection.WriteControl(ws.CloseMessage, ws.FormatCloseMessage(ws.CloseGoingAway, "server deleted"), time.Now().Add(time.Second*5))
|
_ = handler.Connection.WriteControl(ws.CloseMessage, ws.FormatCloseMessage(ws.CloseGoingAway, "server deleted"), time.Now().Add(time.Second*5))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -83,7 +83,7 @@ func getServerWebsocket(c *gin.Context) {
|
||||||
|
|
||||||
go func(msg websocket.Message) {
|
go func(msg websocket.Message) {
|
||||||
if err := handler.HandleInbound(ctx, msg); err != nil {
|
if err := handler.HandleInbound(ctx, msg); err != nil {
|
||||||
handler.SendErrorJson(msg, err)
|
_ = handler.SendErrorJson(msg, err)
|
||||||
}
|
}
|
||||||
}(j)
|
}(j)
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,9 +113,21 @@ func postCreateServer(c *gin.Context) {
|
||||||
c.Status(http.StatusAccepted)
|
c.Status(http.StatusAccepted)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type postUpdateConfigurationResponse struct {
|
||||||
|
Applied bool `json:"applied"`
|
||||||
|
}
|
||||||
|
|
||||||
// Updates the running configuration for this Wings instance.
|
// Updates the running configuration for this Wings instance.
|
||||||
func postUpdateConfiguration(c *gin.Context) {
|
func postUpdateConfiguration(c *gin.Context) {
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
|
|
||||||
|
if cfg.IgnorePanelConfigUpdates {
|
||||||
|
c.JSON(http.StatusOK, postUpdateConfigurationResponse{
|
||||||
|
Applied: false,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if err := c.BindJSON(&cfg); err != nil {
|
if err := c.BindJSON(&cfg); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -139,5 +151,7 @@ func postUpdateConfiguration(c *gin.Context) {
|
||||||
// Since we wrote it to the disk successfully now update the global configuration
|
// Since we wrote it to the disk successfully now update the global configuration
|
||||||
// state to use this new configuration struct.
|
// state to use this new configuration struct.
|
||||||
config.Set(cfg)
|
config.Set(cfg)
|
||||||
c.Status(http.StatusNoContent)
|
c.JSON(http.StatusOK, postUpdateConfigurationResponse{
|
||||||
|
Applied: true,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,6 +106,7 @@ func postTransfers(c *gin.Context) {
|
||||||
if !successful && err != nil {
|
if !successful && err != nil {
|
||||||
// Delete all extracted files.
|
// Delete all extracted files.
|
||||||
go func(trnsfr *transfer.Transfer) {
|
go func(trnsfr *transfer.Transfer) {
|
||||||
|
_ = trnsfr.Server.Filesystem().UnixFS().Close()
|
||||||
if err := os.RemoveAll(trnsfr.Server.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
|
if err := os.RemoveAll(trnsfr.Server.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
|
||||||
trnsfr.Log().WithError(err).Warn("failed to delete local server files")
|
trnsfr.Log().WithError(err).Warn("failed to delete local server files")
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ad, err := b.Generate(s.Context(), s.Filesystem().Path(), ignored)
|
ad, err := b.Generate(s.Context(), s.Filesystem(), ignored)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); err != nil {
|
if err := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); err != nil {
|
||||||
s.Log().WithFields(log.Fields{
|
s.Log().WithFields(log.Fields{
|
||||||
|
@ -154,17 +154,14 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
|
||||||
err = b.Restore(s.Context(), reader, func(file string, info fs.FileInfo, r io.ReadCloser) error {
|
err = b.Restore(s.Context(), reader, func(file string, info fs.FileInfo, r io.ReadCloser) error {
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
|
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
|
||||||
|
// TODO: since this will be called a lot, it may be worth adding an optimized
|
||||||
if err := s.Filesystem().Writefile(file, r); err != nil {
|
// Write with Chtimes method to the UnixFS that is able to re-use the
|
||||||
|
// same dirfd and file name.
|
||||||
|
if err := s.Filesystem().Write(file, r, info.Size(), info.Mode()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := s.Filesystem().Chmod(file, info.Mode()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
atime := info.ModTime()
|
atime := info.ModTime()
|
||||||
mtime := atime
|
return s.Filesystem().Chtimes(file, atime, atime)
|
||||||
return s.Filesystem().Chtimes(file, atime, mtime)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return errors.WithStackIf(err)
|
return errors.WithStackIf(err)
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/remote"
|
"github.com/pterodactyl/wings/remote"
|
||||||
|
"github.com/pterodactyl/wings/server/filesystem"
|
||||||
)
|
)
|
||||||
|
|
||||||
var format = archiver.CompressedArchive{
|
var format = archiver.CompressedArchive{
|
||||||
|
@ -46,7 +47,7 @@ type BackupInterface interface {
|
||||||
WithLogContext(map[string]interface{})
|
WithLogContext(map[string]interface{})
|
||||||
// Generate creates a backup in whatever the configured source for the
|
// Generate creates a backup in whatever the configured source for the
|
||||||
// specific implementation is.
|
// specific implementation is.
|
||||||
Generate(context.Context, string, string) (*ArchiveDetails, error)
|
Generate(context.Context, *filesystem.Filesystem, string) (*ArchiveDetails, error)
|
||||||
// Ignored returns the ignored files for this backup instance.
|
// Ignored returns the ignored files for this backup instance.
|
||||||
Ignored() string
|
Ignored() string
|
||||||
// Checksum returns a SHA1 checksum for the generated backup.
|
// Checksum returns a SHA1 checksum for the generated backup.
|
||||||
|
|
|
@ -59,10 +59,10 @@ func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
|
||||||
|
|
||||||
// Generate generates a backup of the selected files and pushes it to the
|
// Generate generates a backup of the selected files and pushes it to the
|
||||||
// defined location for this instance.
|
// defined location for this instance.
|
||||||
func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
|
func (b *LocalBackup) Generate(ctx context.Context, fsys *filesystem.Filesystem, ignore string) (*ArchiveDetails, error) {
|
||||||
a := &filesystem.Archive{
|
a := &filesystem.Archive{
|
||||||
BasePath: basePath,
|
Filesystem: fsys,
|
||||||
Ignore: ignore,
|
Ignore: ignore,
|
||||||
}
|
}
|
||||||
|
|
||||||
b.log().WithField("path", b.Path()).Info("creating backup for server")
|
b.log().WithField("path", b.Path()).Info("creating backup for server")
|
||||||
|
@ -85,6 +85,7 @@ func (b *LocalBackup) Restore(ctx context.Context, _ io.Reader, callback Restore
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
var reader io.Reader = f
|
var reader io.Reader = f
|
||||||
// Steal the logic we use for making backups which will be applied when restoring
|
// Steal the logic we use for making backups which will be applied when restoring
|
||||||
|
@ -99,7 +100,7 @@ func (b *LocalBackup) Restore(ctx context.Context, _ io.Reader, callback Restore
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
|
return callback(f.NameInArchive, f.FileInfo, r)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,12 +48,12 @@ func (s *S3Backup) WithLogContext(c map[string]interface{}) {
|
||||||
|
|
||||||
// Generate creates a new backup on the disk, moves it into the S3 bucket via
|
// Generate creates a new backup on the disk, moves it into the S3 bucket via
|
||||||
// the provided presigned URL, and then deletes the backup from the disk.
|
// the provided presigned URL, and then deletes the backup from the disk.
|
||||||
func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
|
func (s *S3Backup) Generate(ctx context.Context, fsys *filesystem.Filesystem, ignore string) (*ArchiveDetails, error) {
|
||||||
defer s.Remove()
|
defer s.Remove()
|
||||||
|
|
||||||
a := &filesystem.Archive{
|
a := &filesystem.Archive{
|
||||||
BasePath: basePath,
|
Filesystem: fsys,
|
||||||
Ignore: ignore,
|
Ignore: ignore,
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log().WithField("path", s.Path()).Info("creating backup for server")
|
s.log().WithField("path", s.Path()).Info("creating backup for server")
|
||||||
|
@ -100,7 +100,7 @@ func (s *S3Backup) Restore(ctx context.Context, r io.Reader, callback RestoreCal
|
||||||
}
|
}
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
return callback(filesystem.ExtractNameFromArchive(f), f.FileInfo, r)
|
return callback(f.NameInArchive, f.FileInfo, r)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,9 +4,11 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/gammazero/workerpool"
|
"github.com/gammazero/workerpool"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// UpdateConfigurationFiles updates all of the defined configuration files for
|
// UpdateConfigurationFiles updates all the defined configuration files for
|
||||||
// a server automatically to ensure that they always use the specified values.
|
// a server automatically to ensure that they always use the specified values.
|
||||||
func (s *Server) UpdateConfigurationFiles() {
|
func (s *Server) UpdateConfigurationFiles() {
|
||||||
pool := workerpool.New(runtime.NumCPU())
|
pool := workerpool.New(runtime.NumCPU())
|
||||||
|
@ -18,18 +20,18 @@ func (s *Server) UpdateConfigurationFiles() {
|
||||||
f := cf
|
f := cf
|
||||||
|
|
||||||
pool.Submit(func() {
|
pool.Submit(func() {
|
||||||
p, err := s.Filesystem().SafePath(f.FileName)
|
file, err := s.Filesystem().UnixFS().Touch(f.FileName, ufs.O_RDWR|ufs.O_CREATE, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
|
s.Log().WithField("file_name", f.FileName).WithField("error", err).Error("failed to open file for configuration")
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
if err := f.Parse(p, false); err != nil {
|
if err := f.Parse(file); err != nil {
|
||||||
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
|
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Log().WithField("path", f.FileName).Debug("finished processing server configuration file")
|
s.Log().WithField("file_name", f.FileName).Debug("finished processing server configuration file")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ func (s *Server) Throttler() *ConsoleThrottle {
|
||||||
|
|
||||||
s.throttler = newConsoleThrottle(throttles.Lines, period)
|
s.throttler = newConsoleThrottle(throttles.Lines, period)
|
||||||
s.throttler.strike = func() {
|
s.throttler.strike = func() {
|
||||||
s.PublishConsoleOutputFromDaemon(fmt.Sprintf("Server is outputting console data too quickly -- throttling..."))
|
s.PublishConsoleOutputFromDaemon("Server is outputting console data too quickly -- throttling...")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
return s.throttler
|
return s.throttler
|
||||||
|
|
|
@ -6,6 +6,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"emperror.dev/errors"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/environment"
|
"github.com/pterodactyl/wings/environment"
|
||||||
)
|
)
|
||||||
|
@ -57,7 +59,7 @@ func (s *Server) handleServerCrash() error {
|
||||||
|
|
||||||
exitCode, oomKilled, err := s.Environment.ExitState()
|
exitCode, oomKilled, err := s.Environment.ExitState()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return errors.Wrap(err, "failed to get exit state for server process")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the system is not configured to detect a clean exit code as a crash, and the
|
// If the system is not configured to detect a clean exit code as a crash, and the
|
||||||
|
@ -85,5 +87,5 @@ func (s *Server) handleServerCrash() error {
|
||||||
|
|
||||||
s.crasher.SetLastCrash(time.Now())
|
s.crasher.SetLastCrash(time.Now())
|
||||||
|
|
||||||
return s.HandlePowerAction(PowerActionStart)
|
return errors.Wrap(s.HandlePowerAction(PowerActionStart), "failed to start server after crash detection")
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,12 +13,12 @@ import (
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/juju/ratelimit"
|
"github.com/juju/ratelimit"
|
||||||
"github.com/karrick/godirwalk"
|
|
||||||
"github.com/klauspost/pgzip"
|
"github.com/klauspost/pgzip"
|
||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/internal/progress"
|
"github.com/pterodactyl/wings/internal/progress"
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
const memory = 4 * 1024
|
const memory = 4 * 1024
|
||||||
|
@ -56,25 +56,35 @@ func (p *TarProgress) Write(v []byte) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Archive struct {
|
type Archive struct {
|
||||||
// BasePath is the absolute path to create the archive from where Files and Ignore are
|
// Filesystem to create the archive with.
|
||||||
// relative to.
|
Filesystem *Filesystem
|
||||||
BasePath string
|
|
||||||
|
|
||||||
// Ignore is a gitignore string (most likely read from a file) of files to ignore
|
// Ignore is a gitignore string (most likely read from a file) of files to ignore
|
||||||
// from the archive.
|
// from the archive.
|
||||||
Ignore string
|
Ignore string
|
||||||
|
|
||||||
// Files specifies the files to archive, this takes priority over the Ignore option, if
|
// BaseDirectory .
|
||||||
// unspecified, all files in the BasePath will be archived unless Ignore is set.
|
BaseDirectory string
|
||||||
|
|
||||||
|
// Files specifies the files to archive, this takes priority over the Ignore
|
||||||
|
// option, if unspecified, all files in the BaseDirectory will be archived
|
||||||
|
// unless Ignore is set.
|
||||||
Files []string
|
Files []string
|
||||||
|
|
||||||
// Progress wraps the writer of the archive to pass through the progress tracker.
|
// Progress wraps the writer of the archive to pass through the progress tracker.
|
||||||
Progress *progress.Progress
|
Progress *progress.Progress
|
||||||
|
|
||||||
|
w *TarProgress
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create creates an archive at dst with all the files defined in the
|
// Create creates an archive at dst with all the files defined in the
|
||||||
// included Files array.
|
// included Files array.
|
||||||
|
//
|
||||||
|
// THIS IS UNSAFE TO USE IF `dst` IS PROVIDED BY A USER! ONLY USE THIS WITH
|
||||||
|
// CONTROLLED PATHS!
|
||||||
func (a *Archive) Create(ctx context.Context, dst string) error {
|
func (a *Archive) Create(ctx context.Context, dst string) error {
|
||||||
|
// Using os.OpenFile here is expected, as long as `dst` is not a user
|
||||||
|
// provided path.
|
||||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -95,8 +105,30 @@ func (a *Archive) Create(ctx context.Context, dst string) error {
|
||||||
return a.Stream(ctx, writer)
|
return a.Stream(ctx, writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stream .
|
type walkFunc func(dirfd int, name, relative string, d ufs.DirEntry) error
|
||||||
|
|
||||||
|
// Stream streams the creation of the archive to the given writer.
|
||||||
func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
|
func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
|
||||||
|
if a.Filesystem == nil {
|
||||||
|
return errors.New("filesystem: archive.Filesystem is unset")
|
||||||
|
}
|
||||||
|
|
||||||
|
// The base directory may come with a prefixed `/`, strip it to prevent
|
||||||
|
// problems.
|
||||||
|
a.BaseDirectory = strings.TrimPrefix(a.BaseDirectory, "/")
|
||||||
|
|
||||||
|
if filesLen := len(a.Files); filesLen > 0 {
|
||||||
|
files := make([]string, filesLen)
|
||||||
|
for i, f := range a.Files {
|
||||||
|
if !strings.HasPrefix(f, a.Filesystem.Path()) {
|
||||||
|
files[i] = f
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
files[i] = strings.TrimPrefix(strings.TrimPrefix(f, a.Filesystem.Path()), "/")
|
||||||
|
}
|
||||||
|
a.Files = files
|
||||||
|
}
|
||||||
|
|
||||||
// Choose which compression level to use based on the compression_level configuration option
|
// Choose which compression level to use based on the compression_level configuration option
|
||||||
var compressionLevel int
|
var compressionLevel int
|
||||||
switch config.Get().System.Backups.CompressionLevel {
|
switch config.Get().System.Backups.CompressionLevel {
|
||||||
|
@ -104,8 +136,6 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
|
||||||
compressionLevel = pgzip.NoCompression
|
compressionLevel = pgzip.NoCompression
|
||||||
case "best_compression":
|
case "best_compression":
|
||||||
compressionLevel = pgzip.BestCompression
|
compressionLevel = pgzip.BestCompression
|
||||||
case "best_speed":
|
|
||||||
fallthrough
|
|
||||||
default:
|
default:
|
||||||
compressionLevel = pgzip.BestSpeed
|
compressionLevel = pgzip.BestSpeed
|
||||||
}
|
}
|
||||||
|
@ -119,105 +149,124 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
|
||||||
tw := tar.NewWriter(gw)
|
tw := tar.NewWriter(gw)
|
||||||
defer tw.Close()
|
defer tw.Close()
|
||||||
|
|
||||||
pw := NewTarProgress(tw, a.Progress)
|
a.w = NewTarProgress(tw, a.Progress)
|
||||||
|
|
||||||
// Configure godirwalk.
|
fs := a.Filesystem.unixFS
|
||||||
options := &godirwalk.Options{
|
|
||||||
FollowSymbolicLinks: false,
|
|
||||||
Unsorted: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're specifically looking for only certain files, or have requested
|
// If we're specifically looking for only certain files, or have requested
|
||||||
// that certain files be ignored we'll update the callback function to reflect
|
// that certain files be ignored we'll update the callback function to reflect
|
||||||
// that request.
|
// that request.
|
||||||
var callback godirwalk.WalkFunc
|
var callback walkFunc
|
||||||
if len(a.Files) == 0 && len(a.Ignore) > 0 {
|
if len(a.Files) == 0 && len(a.Ignore) > 0 {
|
||||||
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
|
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
|
||||||
|
callback = a.callback(func(_ int, _, relative string, _ ufs.DirEntry) error {
|
||||||
callback = a.callback(pw, func(_ string, rp string) error {
|
if i.MatchesPath(relative) {
|
||||||
if i.MatchesPath(rp) {
|
return SkipThis
|
||||||
return godirwalk.SkipThis
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
} else if len(a.Files) > 0 {
|
} else if len(a.Files) > 0 {
|
||||||
callback = a.withFilesCallback(pw)
|
callback = a.withFilesCallback()
|
||||||
} else {
|
} else {
|
||||||
callback = a.callback(pw)
|
callback = a.callback()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the callback function, wrapped with support for context cancellation.
|
// Open the base directory we were provided.
|
||||||
options.Callback = func(path string, de *godirwalk.Dirent) error {
|
dirfd, name, closeFd, err := fs.SafePath(a.BaseDirectory)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively walk the base directory.
|
||||||
|
return fs.WalkDirat(dirfd, name, func(dirfd int, name, relative string, d ufs.DirEntry, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
default:
|
default:
|
||||||
return callback(path, de)
|
return callback(dirfd, name, relative, d)
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
|
|
||||||
// Recursively walk the path we are archiving.
|
|
||||||
return godirwalk.Walk(a.BasePath, options)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Callback function used to determine if a given file should be included in the archive
|
// Callback function used to determine if a given file should be included in the archive
|
||||||
// being generated.
|
// being generated.
|
||||||
func (a *Archive) callback(tw *TarProgress, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
|
func (a *Archive) callback(opts ...walkFunc) walkFunc {
|
||||||
return func(path string, de *godirwalk.Dirent) error {
|
// Get the base directory we need to strip when walking.
|
||||||
|
//
|
||||||
|
// This is important as when we are walking, the last part of the base directory
|
||||||
|
// is present on all the paths we walk.
|
||||||
|
var base string
|
||||||
|
if a.BaseDirectory != "" {
|
||||||
|
base = filepath.Base(a.BaseDirectory) + "/"
|
||||||
|
}
|
||||||
|
return func(dirfd int, name, relative string, d ufs.DirEntry) error {
|
||||||
// Skip directories because we are walking them recursively.
|
// Skip directories because we are walking them recursively.
|
||||||
if de.IsDir() {
|
if d.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
relative := filepath.ToSlash(strings.TrimPrefix(path, a.BasePath+string(filepath.Separator)))
|
// If base isn't empty, strip it from the relative path. This fixes an
|
||||||
|
// issue when creating an archive starting from a nested directory.
|
||||||
|
//
|
||||||
|
// See https://github.com/pterodactyl/panel/issues/5030 for more details.
|
||||||
|
if base != "" {
|
||||||
|
relative = strings.TrimPrefix(relative, base)
|
||||||
|
}
|
||||||
|
|
||||||
// Call the additional options passed to this callback function. If any of them return
|
// Call the additional options passed to this callback function. If any of them return
|
||||||
// a non-nil error we will exit immediately.
|
// a non-nil error we will exit immediately.
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
if err := opt(path, relative); err != nil {
|
if err := opt(dirfd, name, relative, d); err != nil {
|
||||||
|
if err == SkipThis {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the file to the archive, if it is nested in a directory,
|
// Add the file to the archive, if it is nested in a directory,
|
||||||
// the directory will be automatically "created" in the archive.
|
// the directory will be automatically "created" in the archive.
|
||||||
return a.addToArchive(path, relative, tw)
|
return a.addToArchive(dirfd, name, relative, d)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var SkipThis = errors.New("skip this")
|
||||||
|
|
||||||
// Pushes only files defined in the Files key to the final archive.
|
// Pushes only files defined in the Files key to the final archive.
|
||||||
func (a *Archive) withFilesCallback(tw *TarProgress) func(path string, de *godirwalk.Dirent) error {
|
func (a *Archive) withFilesCallback() walkFunc {
|
||||||
return a.callback(tw, func(p string, rp string) error {
|
return a.callback(func(_ int, _, relative string, _ ufs.DirEntry) error {
|
||||||
for _, f := range a.Files {
|
for _, f := range a.Files {
|
||||||
// If the given doesn't match, or doesn't have the same prefix continue
|
// Allow exact file matches, otherwise check if file is within a parent directory.
|
||||||
// to the next item in the loop.
|
//
|
||||||
if p != f && !strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", f) {
|
// The slashes are added in the prefix checks to prevent partial name matches from being
|
||||||
|
// included in the archive.
|
||||||
|
if f != relative && !strings.HasPrefix(strings.TrimSuffix(relative, "/")+"/", strings.TrimSuffix(f, "/")+"/") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Once we have a match return a nil value here so that the loop stops and the
|
// Once we have a match return a nil value here so that the loop stops and the
|
||||||
// call to this function will correctly include the file in the archive. If there
|
// call to this function will correctly include the file in the archive. If there
|
||||||
// are no matches we'll never make it to this line, and the final error returned
|
// are no matches we'll never make it to this line, and the final error returned
|
||||||
// will be the godirwalk.SkipThis error.
|
// will be the ufs.SkipDir error.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return godirwalk.SkipThis
|
return SkipThis
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a given file path to the final archive being created.
|
// Adds a given file path to the final archive being created.
|
||||||
func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
func (a *Archive) addToArchive(dirfd int, name, relative string, entry ufs.DirEntry) error {
|
||||||
// Lstat the file, this will give us the same information as Stat except that it will not
|
s, err := entry.Info()
|
||||||
// follow a symlink to its target automatically. This is important to avoid including
|
|
||||||
// files that exist outside the server root unintentionally in the backup.
|
|
||||||
s, err := os.Lstat(p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if errors.Is(err, ufs.ErrNotExist) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return errors.WrapIff(err, "failed executing os.Lstat on '%s'", rp)
|
return errors.WrapIff(err, "failed executing os.Lstat on '%s'", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip socket files as they are unsupported by archive/tar.
|
// Skip socket files as they are unsupported by archive/tar.
|
||||||
|
@ -237,7 +286,7 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Ignore the not exist errors specifically, since there is nothing important about that.
|
// Ignore the not exist errors specifically, since there is nothing important about that.
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
log.WithField("path", rp).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
|
log.WithField("name", name).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -246,17 +295,17 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
||||||
// Get the tar FileInfoHeader in order to add the file to the archive.
|
// Get the tar FileInfoHeader in order to add the file to the archive.
|
||||||
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
|
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", rp)
|
return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fix the header name if the file is not a symlink.
|
// Fix the header name if the file is not a symlink.
|
||||||
if s.Mode()&fs.ModeSymlink == 0 {
|
if s.Mode()&fs.ModeSymlink == 0 {
|
||||||
header.Name = rp
|
header.Name = relative
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the tar FileInfoHeader to the archive.
|
// Write the tar FileInfoHeader to the archive.
|
||||||
if err := w.WriteHeader(header); err != nil {
|
if err := a.w.WriteHeader(header); err != nil {
|
||||||
return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", rp)
|
return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
|
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
|
||||||
|
@ -278,7 +327,7 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open the file.
|
// Open the file.
|
||||||
f, err := os.Open(p)
|
f, err := a.Filesystem.unixFS.OpenFileat(dirfd, name, ufs.O_RDONLY, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
|
@ -288,9 +337,8 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// Copy the file's contents to the archive using our buffer.
|
// Copy the file's contents to the archive using our buffer.
|
||||||
if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
|
if _, err := io.CopyBuffer(a.w, io.LimitReader(f, header.Size), buf); err != nil {
|
||||||
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
|
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
122
server/filesystem/archive_test.go
Normal file
122
server/filesystem/archive_test.go
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
package filesystem
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
iofs "io/fs"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/franela/goblin"
|
||||||
|
"github.com/mholt/archiver/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestArchive_Stream(t *testing.T) {
|
||||||
|
g := Goblin(t)
|
||||||
|
fs, rfs := NewFs()
|
||||||
|
|
||||||
|
g.Describe("Archive", func() {
|
||||||
|
g.AfterEach(func() {
|
||||||
|
// Reset the filesystem after each run.
|
||||||
|
_ = fs.TruncateRootDirectory()
|
||||||
|
})
|
||||||
|
|
||||||
|
g.It("creates archive with intended files", func() {
|
||||||
|
g.Assert(fs.CreateDirectory("test", "/")).IsNil()
|
||||||
|
g.Assert(fs.CreateDirectory("test2", "/")).IsNil()
|
||||||
|
|
||||||
|
r := strings.NewReader("hello, world!\n")
|
||||||
|
err := fs.Write("test/file.txt", r, r.Size(), 0o644)
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
r = strings.NewReader("hello, world!\n")
|
||||||
|
err = fs.Write("test2/file.txt", r, r.Size(), 0o644)
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
r = strings.NewReader("hello, world!\n")
|
||||||
|
err = fs.Write("test_file.txt", r, r.Size(), 0o644)
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
r = strings.NewReader("hello, world!\n")
|
||||||
|
err = fs.Write("test_file.txt.old", r, r.Size(), 0o644)
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
a := &Archive{
|
||||||
|
Filesystem: fs,
|
||||||
|
Files: []string{
|
||||||
|
"test",
|
||||||
|
"test_file.txt",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the archive.
|
||||||
|
archivePath := filepath.Join(rfs.root, "archive.tar.gz")
|
||||||
|
g.Assert(a.Create(context.Background(), archivePath)).IsNil()
|
||||||
|
|
||||||
|
// Ensure the archive exists.
|
||||||
|
_, err = os.Stat(archivePath)
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Open the archive.
|
||||||
|
genericFs, err := archiver.FileSystem(context.Background(), archivePath)
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Assert that we are opening an archive.
|
||||||
|
afs, ok := genericFs.(archiver.ArchiveFS)
|
||||||
|
g.Assert(ok).IsTrue()
|
||||||
|
|
||||||
|
// Get the names of the files recursively from the archive.
|
||||||
|
files, err := getFiles(afs, ".")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Ensure the files in the archive match what we are expecting.
|
||||||
|
expected := []string{
|
||||||
|
"test_file.txt",
|
||||||
|
"test/file.txt",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the slices to ensure the comparison never fails if the
|
||||||
|
// contents are sorted differently.
|
||||||
|
sort.Strings(expected)
|
||||||
|
sort.Strings(files)
|
||||||
|
|
||||||
|
g.Assert(files).Equal(expected)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFiles(f iofs.ReadDirFS, name string) ([]string, error) {
|
||||||
|
var v []string
|
||||||
|
|
||||||
|
entries, err := f.ReadDir(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range entries {
|
||||||
|
entryName := e.Name()
|
||||||
|
if name != "." {
|
||||||
|
entryName = filepath.Join(name, entryName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.IsDir() {
|
||||||
|
files, err := getFiles(f, entryName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if files == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v = append(v, files...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
v = append(v, entryName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
}
|
100
server/filesystem/archiverext/compressed.go
Normal file
100
server/filesystem/archiverext/compressed.go
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
// SPDX-License-Identifier: MIT
|
||||||
|
// SPDX-FileCopyrightText: Copyright (c) 2016 Matthew Holt
|
||||||
|
|
||||||
|
// Code in this file was derived from
|
||||||
|
// https://github.com/mholt/archiver/blob/v4.0.0-alpha.8/fs.go
|
||||||
|
//
|
||||||
|
// These modifications were necessary to allow us to use an already open file
|
||||||
|
// with archiver.FileFS.
|
||||||
|
|
||||||
|
package archiverext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
|
||||||
|
"github.com/mholt/archiver/v4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileFS allows accessing a file on disk using a consistent file system interface.
|
||||||
|
// The value should be the path to a regular file, not a directory. This file will
|
||||||
|
// be the only entry in the file system and will be at its root. It can be accessed
|
||||||
|
// within the file system by the name of "." or the filename.
|
||||||
|
//
|
||||||
|
// If the file is compressed, set the Compression field so that reads from the
|
||||||
|
// file will be transparently decompressed.
|
||||||
|
type FileFS struct {
|
||||||
|
// File is the compressed file backing the FileFS.
|
||||||
|
File fs.File
|
||||||
|
|
||||||
|
// If file is compressed, setting this field will
|
||||||
|
// transparently decompress reads.
|
||||||
|
Compression archiver.Decompressor
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens the named file, which must be the file used to create the file system.
|
||||||
|
func (f FileFS) Open(name string) (fs.File, error) {
|
||||||
|
if err := f.checkName(name, "open"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if f.Compression == nil {
|
||||||
|
return f.File, nil
|
||||||
|
}
|
||||||
|
r, err := f.Compression.OpenReader(f.File)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return compressedFile{f.File, r}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir returns a directory listing with the file as the singular entry.
|
||||||
|
func (f FileFS) ReadDir(name string) ([]fs.DirEntry, error) {
|
||||||
|
if err := f.checkName(name, "stat"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
info, err := f.Stat(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []fs.DirEntry{fs.FileInfoToDirEntry(info)}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat stats the named file, which must be the file used to create the file system.
|
||||||
|
func (f FileFS) Stat(name string) (fs.FileInfo, error) {
|
||||||
|
if err := f.checkName(name, "stat"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.File.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FileFS) checkName(name, op string) error {
|
||||||
|
if !fs.ValidPath(name) {
|
||||||
|
return &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
|
||||||
|
}
|
||||||
|
// TODO: we may need better name validation.
|
||||||
|
if name != "." {
|
||||||
|
return &fs.PathError{Op: op, Path: name, Err: fs.ErrNotExist}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// compressedFile is an fs.File that specially reads
|
||||||
|
// from a decompression reader, and which closes both
|
||||||
|
// that reader and the underlying file.
|
||||||
|
type compressedFile struct {
|
||||||
|
fs.File
|
||||||
|
decomp io.ReadCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cf compressedFile) Read(p []byte) (int, error) {
|
||||||
|
return cf.decomp.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cf compressedFile) Close() error {
|
||||||
|
err := cf.File.Close()
|
||||||
|
err2 := cf.decomp.Close()
|
||||||
|
if err2 != nil && err == nil {
|
||||||
|
err = err2
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
|
@ -1,25 +1,22 @@
|
||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"archive/zip"
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
iofs "io/fs"
|
iofs "io/fs"
|
||||||
"os"
|
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
gzip2 "github.com/klauspost/compress/gzip"
|
"github.com/klauspost/compress/zip"
|
||||||
zip2 "github.com/klauspost/compress/zip"
|
|
||||||
"github.com/mholt/archiver/v4"
|
"github.com/mholt/archiver/v4"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
|
"github.com/pterodactyl/wings/server/filesystem/archiverext"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CompressFiles compresses all the files matching the given paths in the
|
// CompressFiles compresses all the files matching the given paths in the
|
||||||
|
@ -31,46 +28,70 @@ import (
|
||||||
// All paths are relative to the dir that is passed in as the first argument,
|
// All paths are relative to the dir that is passed in as the first argument,
|
||||||
// and the compressed file will be placed at that location named
|
// and the compressed file will be placed at that location named
|
||||||
// `archive-{date}.tar.gz`.
|
// `archive-{date}.tar.gz`.
|
||||||
func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
|
func (fs *Filesystem) CompressFiles(dir string, paths []string) (ufs.FileInfo, error) {
|
||||||
cleanedRootDir, err := fs.SafePath(dir)
|
a := &Archive{Filesystem: fs, BaseDirectory: dir, Files: paths}
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take all the paths passed in and merge them together with the root directory we've gotten.
|
|
||||||
for i, p := range paths {
|
|
||||||
paths[i] = filepath.Join(cleanedRootDir, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
cleaned, err := fs.ParallelSafePath(paths)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
a := &Archive{BasePath: cleanedRootDir, Files: cleaned}
|
|
||||||
d := path.Join(
|
d := path.Join(
|
||||||
cleanedRootDir,
|
dir,
|
||||||
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
|
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
|
||||||
)
|
)
|
||||||
|
f, err := fs.unixFS.OpenFile(d, ufs.O_WRONLY|ufs.O_CREATE, 0o644)
|
||||||
if err := a.Create(context.Background(), d); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Stat(d)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = os.Remove(d)
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
cw := ufs.NewCountedWriter(f)
|
||||||
|
if err := a.Stream(context.Background(), cw); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !fs.unixFS.CanFit(cw.BytesWritten()) {
|
||||||
|
_ = fs.unixFS.Remove(d)
|
||||||
|
return nil, newFilesystemError(ErrCodeDiskSpace, nil)
|
||||||
|
}
|
||||||
|
fs.unixFS.Add(cw.BytesWritten())
|
||||||
|
return f.Stat()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) archiverFileSystem(ctx context.Context, p string) (iofs.FS, error) {
|
||||||
|
f, err := fs.unixFS.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Do not use defer to close `f`, it will likely be used later.
|
||||||
|
|
||||||
|
format, _, err := archiver.Identify(filepath.Base(p), f)
|
||||||
|
if err != nil && !errors.Is(err, archiver.ErrNoMatch) {
|
||||||
|
_ = f.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fs.HasSpaceFor(f.Size()); err != nil {
|
// Reset the file reader.
|
||||||
_ = os.Remove(d)
|
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||||
|
_ = f.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.addDisk(f.Size())
|
info, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
_ = f.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return f, nil
|
if format != nil {
|
||||||
|
switch ff := format.(type) {
|
||||||
|
case archiver.Zip:
|
||||||
|
// zip.Reader is more performant than ArchiveFS, because zip.Reader caches content information
|
||||||
|
// and zip.Reader can open several content files concurrently because of io.ReaderAt requirement
|
||||||
|
// while ArchiveFS can't.
|
||||||
|
// zip.Reader doesn't suffer from issue #330 and #310 according to local test (but they should be fixed anyway)
|
||||||
|
return zip.NewReader(f, info.Size())
|
||||||
|
case archiver.Archival:
|
||||||
|
return archiver.ArchiveFS{Stream: io.NewSectionReader(f, 0, info.Size()), Format: ff, Context: ctx}, nil
|
||||||
|
case archiver.Compression:
|
||||||
|
return archiverext.FileFS{File: f, Compression: ff}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = f.Close()
|
||||||
|
return nil, archiver.ErrNoMatch
|
||||||
}
|
}
|
||||||
|
|
||||||
// SpaceAvailableForDecompression looks through a given archive and determines
|
// SpaceAvailableForDecompression looks through a given archive and determines
|
||||||
|
@ -82,16 +103,7 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
fsys, err := fs.archiverFileSystem(ctx, filepath.Join(dir, file))
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the cached size in a parallel process so that if it is not cached we are not
|
|
||||||
// waiting an unnecessary amount of time on this call.
|
|
||||||
dirSize, err := fs.DiskUsage(false)
|
|
||||||
|
|
||||||
fsys, err := archiver.FileSystem(source)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, archiver.ErrNoMatch) {
|
if errors.Is(err, archiver.ErrNoMatch) {
|
||||||
return newFilesystemError(ErrCodeUnknownArchive, err)
|
return newFilesystemError(ErrCodeUnknownArchive, err)
|
||||||
|
@ -99,7 +111,7 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var size int64
|
var size atomic.Int64
|
||||||
return iofs.WalkDir(fsys, ".", func(path string, d iofs.DirEntry, err error) error {
|
return iofs.WalkDir(fsys, ".", func(path string, d iofs.DirEntry, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -114,7 +126,7 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if atomic.AddInt64(&size, info.Size())+dirSize > fs.MaxDisk() {
|
if !fs.unixFS.CanFit(size.Add(info.Size())) {
|
||||||
return newFilesystemError(ErrCodeDiskSpace, nil)
|
return newFilesystemError(ErrCodeDiskSpace, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -128,27 +140,11 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
|
||||||
// zip-slip attack being attempted by validating that the final path is within
|
// zip-slip attack being attempted by validating that the final path is within
|
||||||
// the server data directory.
|
// the server data directory.
|
||||||
func (fs *Filesystem) DecompressFile(ctx context.Context, dir string, file string) error {
|
func (fs *Filesystem) DecompressFile(ctx context.Context, dir string, file string) error {
|
||||||
source, err := fs.SafePath(filepath.Join(dir, file))
|
f, err := fs.unixFS.Open(filepath.Join(dir, file))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fs.DecompressFileUnsafe(ctx, dir, source)
|
defer f.Close()
|
||||||
}
|
|
||||||
|
|
||||||
// DecompressFileUnsafe will decompress any file on the local disk without checking
|
|
||||||
// if it is owned by the server. The file will be SAFELY decompressed and extracted
|
|
||||||
// into the server's directory.
|
|
||||||
func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file string) error {
|
|
||||||
// Ensure that the archive actually exists on the system.
|
|
||||||
if _, err := os.Stat(file); err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.Open(file)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// TODO: defer file close?
|
|
||||||
|
|
||||||
// Identify the type of archive we are dealing with.
|
// Identify the type of archive we are dealing with.
|
||||||
format, input, err := archiver.Identify(filepath.Base(file), f)
|
format, input, err := archiver.Identify(filepath.Base(file), f)
|
||||||
|
@ -160,6 +156,7 @@ func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.extractStream(ctx, extractStreamOptions{
|
return fs.extractStream(ctx, extractStreamOptions{
|
||||||
|
FileName: file,
|
||||||
Directory: dir,
|
Directory: dir,
|
||||||
Format: format,
|
Format: format,
|
||||||
Reader: input,
|
Reader: input,
|
||||||
|
@ -175,7 +172,6 @@ func (fs *Filesystem) ExtractStreamUnsafe(ctx context.Context, dir string, r io.
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.extractStream(ctx, extractStreamOptions{
|
return fs.extractStream(ctx, extractStreamOptions{
|
||||||
Directory: dir,
|
Directory: dir,
|
||||||
Format: format,
|
Format: format,
|
||||||
|
@ -195,80 +191,95 @@ type extractStreamOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *Filesystem) extractStream(ctx context.Context, opts extractStreamOptions) error {
|
func (fs *Filesystem) extractStream(ctx context.Context, opts extractStreamOptions) error {
|
||||||
// Decompress and extract archive
|
|
||||||
if ex, ok := opts.Format.(archiver.Extractor); ok {
|
// See if it's a compressed archive, such as TAR or a ZIP
|
||||||
return ex.Extract(ctx, opts.Reader, nil, func(ctx context.Context, f archiver.File) error {
|
ex, ok := opts.Format.(archiver.Extractor)
|
||||||
if f.IsDir() {
|
if !ok {
|
||||||
return nil
|
|
||||||
|
// If not, check if it's a single-file compression, such as
|
||||||
|
// .log.gz, .sql.gz, and so on
|
||||||
|
de, ok := opts.Format.(archiver.Decompressor)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip the compression suffix
|
||||||
|
p := filepath.Join(opts.Directory, strings.TrimSuffix(opts.FileName, opts.Format.Name()))
|
||||||
|
|
||||||
|
// Make sure it's not ignored
|
||||||
|
if err := fs.IsIgnored(p); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := de.OpenReader(opts.Reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
|
||||||
|
// Open the file for creation/writing
|
||||||
|
f, err := fs.unixFS.OpenFile(p, ufs.O_WRONLY|ufs.O_CREATE, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// Read in 4 KB chunks
|
||||||
|
buf := make([]byte, 4096)
|
||||||
|
for {
|
||||||
|
n, err := reader.Read(buf)
|
||||||
|
if n > 0 {
|
||||||
|
|
||||||
|
// Check quota before writing the chunk
|
||||||
|
if quotaErr := fs.HasSpaceFor(int64(n)); quotaErr != nil {
|
||||||
|
return quotaErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the chunk
|
||||||
|
if _, writeErr := f.Write(buf[:n]); writeErr != nil {
|
||||||
|
return writeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to quota
|
||||||
|
fs.addDisk(int64(n))
|
||||||
}
|
}
|
||||||
p := filepath.Join(opts.Directory, ExtractNameFromArchive(f))
|
|
||||||
// If it is ignored, just don't do anything with the file and skip over it.
|
|
||||||
if err := fs.IsIgnored(p); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r, err := f.Open()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// EOF are expected
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return any other
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer r.Close()
|
|
||||||
if err := fs.Writefile(p, r); err != nil {
|
|
||||||
return wrapError(err, opts.FileName)
|
|
||||||
}
|
|
||||||
// Update the file permissions to the one set in the archive.
|
|
||||||
if err := fs.Chmod(p, f.Mode()); err != nil {
|
|
||||||
return wrapError(err, opts.FileName)
|
|
||||||
}
|
|
||||||
// Update the file modification time to the one set in the archive.
|
|
||||||
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
|
|
||||||
return wrapError(err, opts.FileName)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtractNameFromArchive looks at an archive file to try and determine the name
|
|
||||||
// for a given element in an archive. Because of... who knows why, each file type
|
|
||||||
// uses different methods to determine the file name.
|
|
||||||
//
|
|
||||||
// If there is a archiver.File#Sys() value present we will try to use the name
|
|
||||||
// present in there, otherwise falling back to archiver.File#Name() if all else
|
|
||||||
// fails. Without this logic present, some archive types such as zip/tars/etc.
|
|
||||||
// will write all of the files to the base directory, rather than the nested
|
|
||||||
// directory that is expected.
|
|
||||||
//
|
|
||||||
// For files like ".rar" types, there is no f.Sys() value present, and the value
|
|
||||||
// of archiver.File#Name() will be what you need.
|
|
||||||
func ExtractNameFromArchive(f archiver.File) string {
|
|
||||||
sys := f.Sys()
|
|
||||||
// Some archive types won't have a value returned when you call f.Sys() on them,
|
|
||||||
// such as ".rar" archives for example. In those cases the only thing you can do
|
|
||||||
// is hope that "f.Name()" is actually correct for them.
|
|
||||||
if sys == nil {
|
|
||||||
return f.Name()
|
|
||||||
}
|
|
||||||
switch s := sys.(type) {
|
|
||||||
case *zip.FileHeader:
|
|
||||||
return s.Name
|
|
||||||
case *zip2.FileHeader:
|
|
||||||
return s.Name
|
|
||||||
case *tar.Header:
|
|
||||||
return s.Name
|
|
||||||
case *gzip.Header:
|
|
||||||
return s.Name
|
|
||||||
case *gzip2.Header:
|
|
||||||
return s.Name
|
|
||||||
default:
|
|
||||||
// At this point we cannot figure out what type of archive this might be so
|
|
||||||
// just try to find the name field in the struct. If it is found return it.
|
|
||||||
field := reflect.Indirect(reflect.ValueOf(sys)).FieldByName("Name")
|
|
||||||
if field.IsValid() {
|
|
||||||
return field.String()
|
|
||||||
}
|
}
|
||||||
// Fallback to the basename of the file at this point. There is nothing we can really
|
|
||||||
// do to try and figure out what the underlying directory of the file is supposed to
|
return nil
|
||||||
// be since it didn't implement a name field.
|
|
||||||
return f.Name()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Decompress and extract archive
|
||||||
|
return ex.Extract(ctx, opts.Reader, nil, func(ctx context.Context, f archiver.File) error {
|
||||||
|
if f.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p := filepath.Join(opts.Directory, f.NameInArchive)
|
||||||
|
// If it is ignored, just don't do anything with the file and skip over it.
|
||||||
|
if err := fs.IsIgnored(p); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r, err := f.Open()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
if err := fs.Write(p, r, f.Size(), f.Mode()); err != nil {
|
||||||
|
return wrapError(err, opts.FileName)
|
||||||
|
}
|
||||||
|
// Update the file modification time to the one set in the archive.
|
||||||
|
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
|
||||||
|
return wrapError(err, opts.FileName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,17 +3,18 @@ package filesystem
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Given an archive named test.{ext}, with the following file structure:
|
// Given an archive named test.{ext}, with the following file structure:
|
||||||
|
//
|
||||||
// test/
|
// test/
|
||||||
// |──inside/
|
// |──inside/
|
||||||
// |────finside.txt
|
// |────finside.txt
|
||||||
// |──outside.txt
|
// |──outside.txt
|
||||||
|
//
|
||||||
// this test will ensure that it's being decompressed as expected
|
// this test will ensure that it's being decompressed as expected
|
||||||
func TestFilesystem_DecompressFile(t *testing.T) {
|
func TestFilesystem_DecompressFile(t *testing.T) {
|
||||||
g := Goblin(t)
|
g := Goblin(t)
|
||||||
|
@ -47,9 +48,7 @@ func TestFilesystem_DecompressFile(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
|
||||||
atomic.StoreInt64(&fs.diskLimit, 0)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,26 +1,29 @@
|
||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
"github.com/karrick/godirwalk"
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SpaceCheckingOpts struct {
|
type SpaceCheckingOpts struct {
|
||||||
AllowStaleResponse bool
|
AllowStaleResponse bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: can this be replaced with some sort of atomic? Like atomic.Pointer?
|
||||||
type usageLookupTime struct {
|
type usageLookupTime struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
value time.Time
|
value time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the last time that a disk space lookup was performed.
|
// Set sets the last time that a disk space lookup was performed.
|
||||||
func (ult *usageLookupTime) Set(t time.Time) {
|
func (ult *usageLookupTime) Set(t time.Time) {
|
||||||
ult.Lock()
|
ult.Lock()
|
||||||
ult.value = t
|
ult.value = t
|
||||||
|
@ -35,14 +38,15 @@ func (ult *usageLookupTime) Get() time.Time {
|
||||||
return ult.value
|
return ult.value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
|
// MaxDisk returns the maximum amount of disk space that this Filesystem
|
||||||
|
// instance is allowed to use.
|
||||||
func (fs *Filesystem) MaxDisk() int64 {
|
func (fs *Filesystem) MaxDisk() int64 {
|
||||||
return atomic.LoadInt64(&fs.diskLimit)
|
return fs.unixFS.Limit()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets the disk space limit for this Filesystem instance.
|
// SetDiskLimit sets the disk space limit for this Filesystem instance.
|
||||||
func (fs *Filesystem) SetDiskLimit(i int64) {
|
func (fs *Filesystem) SetDiskLimit(i int64) {
|
||||||
atomic.SwapInt64(&fs.diskLimit, i)
|
fs.unixFS.SetLimit(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The same concept as HasSpaceAvailable however this will return an error if there is
|
// The same concept as HasSpaceAvailable however this will return an error if there is
|
||||||
|
@ -65,7 +69,7 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
|
||||||
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
||||||
size, err := fs.DiskUsage(allowStaleValue)
|
size, err := fs.DiskUsage(allowStaleValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
|
log.WithField("root", fs.Path()).WithField("error", err).Warn("failed to determine root fs directory size")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
// If space is -1 or 0 just return true, means they're allowed unlimited.
|
||||||
|
@ -84,7 +88,7 @@ func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
|
||||||
// function for critical logical checks. It should only be used in areas where the actual disk usage
|
// function for critical logical checks. It should only be used in areas where the actual disk usage
|
||||||
// does not need to be perfect, e.g. API responses for server resource usage.
|
// does not need to be perfect, e.g. API responses for server resource usage.
|
||||||
func (fs *Filesystem) CachedUsage() int64 {
|
func (fs *Filesystem) CachedUsage() int64 {
|
||||||
return atomic.LoadInt64(&fs.diskUsed)
|
return fs.unixFS.Usage()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
// Internal helper function to allow other parts of the codebase to check the total used disk space
|
||||||
|
@ -114,14 +118,14 @@ func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
|
||||||
// currently performing a lookup, just do the disk usage calculation in the background.
|
// currently performing a lookup, just do the disk usage calculation in the background.
|
||||||
go func(fs *Filesystem) {
|
go func(fs *Filesystem) {
|
||||||
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
if _, err := fs.updateCachedDiskUsage(); err != nil {
|
||||||
log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
|
log.WithField("root", fs.Path()).WithField("error", err).Warn("failed to update fs disk usage from within routine")
|
||||||
}
|
}
|
||||||
}(fs)
|
}(fs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the currently cached value back to the calling function.
|
// Return the currently cached value back to the calling function.
|
||||||
return atomic.LoadInt64(&fs.diskUsed), nil
|
return fs.unixFS.Usage(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Updates the currently used disk space for a server.
|
// Updates the currently used disk space for a server.
|
||||||
|
@ -149,63 +153,56 @@ func (fs *Filesystem) updateCachedDiskUsage() (int64, error) {
|
||||||
// error encountered.
|
// error encountered.
|
||||||
fs.lastLookupTime.Set(time.Now())
|
fs.lastLookupTime.Set(time.Now())
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, size)
|
fs.unixFS.SetUsage(size)
|
||||||
|
|
||||||
return size, err
|
return size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Determines the directory size of a given location by running parallel tasks to iterate
|
// DirectorySize calculates the size of a directory and its descendants.
|
||||||
// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
|
func (fs *Filesystem) DirectorySize(root string) (int64, error) {
|
||||||
// on locations with tons of files, so it is recommended that you cache the output.
|
dirfd, name, closeFd, err := fs.unixFS.SafePath(root)
|
||||||
func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
|
defer closeFd()
|
||||||
d, err := fs.SafePath(dir)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var size int64
|
var hardLinks []uint64
|
||||||
var st syscall.Stat_t
|
|
||||||
|
|
||||||
err = godirwalk.Walk(d, &godirwalk.Options{
|
var size atomic.Int64
|
||||||
Unsorted: true,
|
err = fs.unixFS.WalkDirat(dirfd, name, func(dirfd int, name, _ string, d ufs.DirEntry, err error) error {
|
||||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
if err != nil {
|
||||||
// If this is a symlink then resolve the final destination of it before trying to continue walking
|
return errors.Wrap(err, "walkdirat err")
|
||||||
// over its contents. If it resolves outside the server data directory just skip everything else for
|
}
|
||||||
// it. Otherwise, allow it to continue.
|
|
||||||
if e.IsSymlink() {
|
|
||||||
if _, err := fs.SafePath(p); err != nil {
|
|
||||||
if IsErrorCode(err, ErrCodePathResolution) {
|
|
||||||
return godirwalk.SkipThis
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !e.IsDir() {
|
|
||||||
syscall.Lstat(p, &st)
|
|
||||||
atomic.AddInt64(&size, st.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Only calculate the size of regular files.
|
||||||
|
if !d.Type().IsRegular() {
|
||||||
return nil
|
return nil
|
||||||
},
|
}
|
||||||
})
|
|
||||||
|
|
||||||
return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
info, err := fs.unixFS.Lstatat(dirfd, name)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "lstatat err")
|
||||||
|
}
|
||||||
|
|
||||||
|
var sysFileInfo = info.Sys().(*unix.Stat_t)
|
||||||
|
if sysFileInfo.Nlink > 1 {
|
||||||
|
// Hard links have the same inode number
|
||||||
|
if slices.Contains(hardLinks, sysFileInfo.Ino) {
|
||||||
|
// Don't add hard links size twice
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
hardLinks = append(hardLinks, sysFileInfo.Ino)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size.Add(info.Size())
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return size.Load(), errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to determine if a server has space available for a file of a given size.
|
|
||||||
// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
|
|
||||||
// will be raised.
|
|
||||||
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
func (fs *Filesystem) HasSpaceFor(size int64) error {
|
||||||
if fs.MaxDisk() == 0 {
|
if !fs.unixFS.CanFit(size) {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
s, err := fs.DiskUsage(true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if (s + size) > fs.MaxDisk() {
|
|
||||||
return newFilesystemError(ErrCodeDiskSpace, nil)
|
return newFilesystemError(ErrCodeDiskSpace, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -213,24 +210,5 @@ func (fs *Filesystem) HasSpaceFor(size int64) error {
|
||||||
|
|
||||||
// Updates the disk usage for the Filesystem instance.
|
// Updates the disk usage for the Filesystem instance.
|
||||||
func (fs *Filesystem) addDisk(i int64) int64 {
|
func (fs *Filesystem) addDisk(i int64) int64 {
|
||||||
size := atomic.LoadInt64(&fs.diskUsed)
|
return fs.unixFS.Add(i)
|
||||||
|
|
||||||
// Sorry go gods. This is ugly but the best approach I can come up with for right
|
|
||||||
// now without completely re-evaluating the logic we use for determining disk space.
|
|
||||||
//
|
|
||||||
// Normally I would just be using the atomic load right below, but I'm not sure about
|
|
||||||
// the scenarios where it is 0 because nothing has run that would trigger a disk size
|
|
||||||
// calculation?
|
|
||||||
//
|
|
||||||
// Perhaps that isn't even a concern for the sake of this?
|
|
||||||
if !fs.isTest {
|
|
||||||
size, _ = fs.DiskUsage(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we're dropping below 0 somehow just cap it to 0.
|
|
||||||
if (size + i) < 0 {
|
|
||||||
return atomic.SwapInt64(&fs.diskUsed, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
return atomic.AddInt64(&fs.diskUsed, i)
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,11 +2,12 @@ package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"github.com/apex/log"
|
"github.com/apex/log"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ErrorCode string
|
type ErrorCode string
|
||||||
|
@ -86,15 +87,15 @@ func (e *Error) Unwrap() error {
|
||||||
|
|
||||||
// Generates an error logger instance with some basic information.
|
// Generates an error logger instance with some basic information.
|
||||||
func (fs *Filesystem) error(err error) *log.Entry {
|
func (fs *Filesystem) error(err error) *log.Entry {
|
||||||
return log.WithField("subsystem", "filesystem").WithField("root", fs.root).WithField("error", err)
|
return log.WithField("subsystem", "filesystem").WithField("root", fs.Path()).WithField("error", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle errors encountered when walking through directories.
|
// Handle errors encountered when walking through directories.
|
||||||
//
|
//
|
||||||
// If there is a path resolution error just skip the item entirely. Only return this for a
|
// If there is a path resolution error just skip the item entirely. Only return this for a
|
||||||
// directory, otherwise return nil. Returning this error for a file will stop the walking
|
// directory, otherwise return nil. Returning this error for a file will stop the walking
|
||||||
// for the remainder of the directory. This is assuming an os.FileInfo struct was even returned.
|
// for the remainder of the directory. This is assuming an FileInfo struct was even returned.
|
||||||
func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
|
func (fs *Filesystem) handleWalkerError(err error, f ufs.FileInfo) error {
|
||||||
if !IsErrorCode(err, ErrCodePathResolution) {
|
if !IsErrorCode(err, ErrCodePathResolution) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +1,11 @@
|
||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -15,212 +13,208 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
"github.com/apex/log"
|
||||||
"github.com/gabriel-vasile/mimetype"
|
"github.com/gabriel-vasile/mimetype"
|
||||||
"github.com/karrick/godirwalk"
|
|
||||||
ignore "github.com/sabhiram/go-gitignore"
|
ignore "github.com/sabhiram/go-gitignore"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Filesystem struct {
|
type Filesystem struct {
|
||||||
|
unixFS *ufs.Quota
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
lastLookupTime *usageLookupTime
|
lastLookupTime *usageLookupTime
|
||||||
lookupInProgress *system.AtomicBool
|
lookupInProgress atomic.Bool
|
||||||
diskUsed int64
|
|
||||||
diskCheckInterval time.Duration
|
diskCheckInterval time.Duration
|
||||||
denylist *ignore.GitIgnore
|
denylist *ignore.GitIgnore
|
||||||
|
|
||||||
// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
|
|
||||||
diskLimit int64
|
|
||||||
|
|
||||||
// The root data directory path for this Filesystem instance.
|
|
||||||
root string
|
|
||||||
|
|
||||||
isTest bool
|
isTest bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new Filesystem instance for a given server.
|
// New creates a new Filesystem instance for a given server.
|
||||||
func New(root string, size int64, denylist []string) *Filesystem {
|
func New(root string, size int64, denylist []string) (*Filesystem, error) {
|
||||||
|
if err := os.MkdirAll(root, 0o755); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
unixFS, err := ufs.NewUnixFS(root, config.UseOpenat2())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
quota := ufs.NewQuota(unixFS, size)
|
||||||
|
|
||||||
return &Filesystem{
|
return &Filesystem{
|
||||||
root: root,
|
unixFS: quota,
|
||||||
diskLimit: size,
|
|
||||||
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
|
||||||
lastLookupTime: &usageLookupTime{},
|
lastLookupTime: &usageLookupTime{},
|
||||||
lookupInProgress: system.NewAtomicBool(false),
|
|
||||||
denylist: ignore.CompileIgnoreLines(denylist...),
|
denylist: ignore.CompileIgnoreLines(denylist...),
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Path returns the root path for the Filesystem instance.
|
// Path returns the root path for the Filesystem instance.
|
||||||
func (fs *Filesystem) Path() string {
|
func (fs *Filesystem) Path() string {
|
||||||
return fs.root
|
return fs.unixFS.BasePath()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDir reads directory entries.
|
||||||
|
func (fs *Filesystem) ReadDir(path string) ([]ufs.DirEntry, error) {
|
||||||
|
return fs.unixFS.ReadDir(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadDirStat is like ReadDir except that it returns FileInfo for each entry
|
||||||
|
// instead of just a DirEntry.
|
||||||
|
func (fs *Filesystem) ReadDirStat(path string) ([]ufs.FileInfo, error) {
|
||||||
|
return ufs.ReadDirMap(fs.unixFS.UnixFS, path, func(e ufs.DirEntry) (ufs.FileInfo, error) {
|
||||||
|
return e.Info()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// File returns a reader for a file instance as well as the stat information.
|
// File returns a reader for a file instance as well as the stat information.
|
||||||
func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
|
func (fs *Filesystem) File(p string) (ufs.File, Stat, error) {
|
||||||
cleaned, err := fs.SafePath(p)
|
f, err := fs.unixFS.Open(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, Stat{}, errors.WithStackIf(err)
|
return nil, Stat{}, err
|
||||||
}
|
}
|
||||||
st, err := fs.Stat(cleaned)
|
st, err := statFromFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
_ = f.Close()
|
||||||
return nil, Stat{}, newFilesystemError(ErrNotExist, err)
|
return nil, Stat{}, err
|
||||||
}
|
|
||||||
return nil, Stat{}, errors.WithStackIf(err)
|
|
||||||
}
|
|
||||||
if st.IsDir() {
|
|
||||||
return nil, Stat{}, newFilesystemError(ErrCodeIsDirectory, nil)
|
|
||||||
}
|
|
||||||
f, err := os.Open(cleaned)
|
|
||||||
if err != nil {
|
|
||||||
return nil, Stat{}, errors.WithStackIf(err)
|
|
||||||
}
|
}
|
||||||
return f, st, nil
|
return f, st, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) UnixFS() *ufs.UnixFS {
|
||||||
|
return fs.unixFS.UnixFS
|
||||||
|
}
|
||||||
|
|
||||||
// Touch acts by creating the given file and path on the disk if it is not present
|
// Touch acts by creating the given file and path on the disk if it is not present
|
||||||
// already. If it is present, the file is opened using the defaults which will truncate
|
// already. If it is present, the file is opened using the defaults which will truncate
|
||||||
// the contents. The opened file is then returned to the caller.
|
// the contents. The opened file is then returned to the caller.
|
||||||
func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
func (fs *Filesystem) Touch(p string, flag int) (ufs.File, error) {
|
||||||
cleaned, err := fs.SafePath(p)
|
return fs.unixFS.Touch(p, flag, 0o644)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(cleaned, flag, 0o644)
|
|
||||||
if err == nil {
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
// If the error is not because it doesn't exist then we just need to bail at this point.
|
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
|
||||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file handle")
|
|
||||||
}
|
|
||||||
// Only create and chown the directory if it doesn't exist.
|
|
||||||
if _, err := os.Stat(filepath.Dir(cleaned)); errors.Is(err, os.ErrNotExist) {
|
|
||||||
// Create the path leading up to the file we're trying to create, setting the final perms
|
|
||||||
// on it as we go.
|
|
||||||
if err := os.MkdirAll(filepath.Dir(cleaned), 0o755); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to create directory tree")
|
|
||||||
}
|
|
||||||
if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o := &fileOpener{}
|
|
||||||
// Try to open the file now that we have created the pathing necessary for it, and then
|
|
||||||
// Chown that file so that the permissions don't mess with things.
|
|
||||||
f, err = o.open(cleaned, flag, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file with wait")
|
|
||||||
}
|
|
||||||
_ = fs.Chown(cleaned)
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writefile writes a file to the system. If the file does not already exist one
|
// Writefile writes a file to the system. If the file does not already exist one
|
||||||
// will be created. This will also properly recalculate the disk space used by
|
// will be created. This will also properly recalculate the disk space used by
|
||||||
// the server when writing new files or modifying existing ones.
|
// the server when writing new files or modifying existing ones.
|
||||||
|
//
|
||||||
|
// DEPRECATED: use `Write` instead.
|
||||||
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
|
||||||
cleaned, err := fs.SafePath(p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var currentSize int64
|
var currentSize int64
|
||||||
// If the file does not exist on the system already go ahead and create the pathway
|
st, err := fs.unixFS.Stat(p)
|
||||||
// to it and an empty file. We'll then write to it later on after this completes.
|
if err != nil && !errors.Is(err, ufs.ErrNotExist) {
|
||||||
stat, err := os.Stat(cleaned)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
|
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
if stat.IsDir() {
|
if st.IsDir() {
|
||||||
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: cleaned})
|
// TODO: resolved
|
||||||
|
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: ""})
|
||||||
}
|
}
|
||||||
currentSize = stat.Size()
|
currentSize = st.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Touch the file and return the handle to it at this point. This will
|
||||||
|
// create or truncate the file, and create any necessary parent directories
|
||||||
|
// if they are missing.
|
||||||
|
file, err := fs.unixFS.Touch(p, ufs.O_RDWR|ufs.O_TRUNC, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error touching file: %w", err)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
// Do not use CopyBuffer here, it is wasteful as the file implements
|
||||||
|
// io.ReaderFrom, which causes it to not use the buffer anyways.
|
||||||
|
n, err := io.Copy(file, r)
|
||||||
|
|
||||||
|
// Adjust the disk usage to account for the old size and the new size of the file.
|
||||||
|
fs.unixFS.Add(n - currentSize)
|
||||||
|
|
||||||
|
if err := fs.chownFile(p); err != nil {
|
||||||
|
return fmt.Errorf("error chowning file: %w", err)
|
||||||
|
}
|
||||||
|
// Return the error from io.Copy.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) Write(p string, r io.Reader, newSize int64, mode ufs.FileMode) error {
|
||||||
|
var currentSize int64
|
||||||
|
st, err := fs.unixFS.Stat(p)
|
||||||
|
if err != nil && !errors.Is(err, ufs.ErrNotExist) {
|
||||||
|
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
|
||||||
|
} else if err == nil {
|
||||||
|
if st.IsDir() {
|
||||||
|
// TODO: resolved
|
||||||
|
return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: ""})
|
||||||
|
}
|
||||||
|
currentSize = st.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
br := bufio.NewReader(r)
|
|
||||||
// Check that the new size we're writing to the disk can fit. If there is currently
|
// Check that the new size we're writing to the disk can fit. If there is currently
|
||||||
// a file we'll subtract that current file size from the size of the buffer to determine
|
// a file we'll subtract that current file size from the size of the buffer to determine
|
||||||
// the amount of new data we're writing (or amount we're removing if smaller).
|
// the amount of new data we're writing (or amount we're removing if smaller).
|
||||||
if err := fs.HasSpaceFor(int64(br.Size()) - currentSize); err != nil {
|
if err := fs.HasSpaceFor(newSize - currentSize); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Touch the file and return the handle to it at this point. This will create the file,
|
// Touch the file and return the handle to it at this point. This will
|
||||||
// any necessary directories, and set the proper owner of the file.
|
// create or truncate the file, and create any necessary parent directories
|
||||||
file, err := fs.Touch(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
|
// if they are missing.
|
||||||
|
file, err := fs.unixFS.Touch(p, ufs.O_RDWR|ufs.O_TRUNC, mode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
buf := make([]byte, 1024*4)
|
if newSize == 0 {
|
||||||
sz, err := io.CopyBuffer(file, r, buf)
|
// Subtract the previous size of the file if the new size is 0.
|
||||||
|
fs.unixFS.Add(-currentSize)
|
||||||
|
} else {
|
||||||
|
// Do not use CopyBuffer here, it is wasteful as the file implements
|
||||||
|
// io.ReaderFrom, which causes it to not use the buffer anyways.
|
||||||
|
var n int64
|
||||||
|
n, err = io.Copy(file, io.LimitReader(r, newSize))
|
||||||
|
|
||||||
// Adjust the disk usage to account for the old size and the new size of the file.
|
// Adjust the disk usage to account for the old size and the new size of the file.
|
||||||
fs.addDisk(sz - currentSize)
|
fs.unixFS.Add(n - currentSize)
|
||||||
|
}
|
||||||
|
|
||||||
return fs.Chown(cleaned)
|
if err := fs.chownFile(p); err != nil {
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new directory (name) at a specified path (p) for the server.
|
|
||||||
func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
|
||||||
cleaned, err := fs.SafePath(path.Join(p, name))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return os.MkdirAll(cleaned, 0o755)
|
// Return any remaining error.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rename moves (or renames) a file or directory.
|
// CreateDirectory creates a new directory (name) at a specified path (p) for
|
||||||
func (fs *Filesystem) Rename(from string, to string) error {
|
// the server.
|
||||||
cleanedFrom, err := fs.SafePath(from)
|
func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
||||||
if err != nil {
|
return fs.unixFS.MkdirAll(filepath.Join(p, name), 0o755)
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanedTo, err := fs.SafePath(to)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the target file or directory already exists the rename function will fail, so just
|
|
||||||
// bail out now.
|
|
||||||
if _, err := os.Stat(cleanedTo); err == nil {
|
|
||||||
return os.ErrExist
|
|
||||||
}
|
|
||||||
|
|
||||||
if cleanedTo == fs.Path() {
|
|
||||||
return errors.New("attempting to rename into an invalid directory space")
|
|
||||||
}
|
|
||||||
|
|
||||||
d := strings.TrimSuffix(cleanedTo, path.Base(cleanedTo))
|
|
||||||
// Ensure that the directory we're moving into exists correctly on the system. Only do this if
|
|
||||||
// we're not at the root directory level.
|
|
||||||
if d != fs.Path() {
|
|
||||||
if mkerr := os.MkdirAll(d, 0o755); mkerr != nil {
|
|
||||||
return errors.WithMessage(mkerr, "failed to create directory structure for file rename")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Rename(cleanedFrom, cleanedTo); err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Recursively iterates over a file or directory and sets the permissions on all of the
|
func (fs *Filesystem) Rename(oldpath, newpath string) error {
|
||||||
|
return fs.unixFS.Rename(oldpath, newpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) Symlink(oldpath, newpath string) error {
|
||||||
|
return fs.unixFS.Symlink(oldpath, newpath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs *Filesystem) chownFile(name string) error {
|
||||||
|
if fs.isTest {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
uid := config.Get().System.User.Uid
|
||||||
|
gid := config.Get().System.User.Gid
|
||||||
|
return fs.unixFS.Lchown(name, uid, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chown recursively iterates over a file or directory and sets the permissions on all of the
|
||||||
// underlying files. Iterate over all of the files and directories. If it is a file just
|
// underlying files. Iterate over all of the files and directories. If it is a file just
|
||||||
// go ahead and perform the chown operation. Otherwise dig deeper into the directory until
|
// go ahead and perform the chown operation. Otherwise dig deeper into the directory until
|
||||||
// we've run out of directories to dig into.
|
// we've run out of directories to dig into.
|
||||||
func (fs *Filesystem) Chown(path string) error {
|
func (fs *Filesystem) Chown(p string) error {
|
||||||
cleaned, err := fs.SafePath(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if fs.isTest {
|
if fs.isTest {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -228,55 +222,44 @@ func (fs *Filesystem) Chown(path string) error {
|
||||||
uid := config.Get().System.User.Uid
|
uid := config.Get().System.User.Uid
|
||||||
gid := config.Get().System.User.Gid
|
gid := config.Get().System.User.Gid
|
||||||
|
|
||||||
|
dirfd, name, closeFd, err := fs.unixFS.SafePath(p)
|
||||||
|
defer closeFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Start by just chowning the initial path that we received.
|
// Start by just chowning the initial path that we received.
|
||||||
if err := os.Chown(cleaned, uid, gid); err != nil {
|
if err := fs.unixFS.Lchownat(dirfd, name, uid, gid); err != nil {
|
||||||
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
|
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is not a directory we can now return from the function, there is nothing
|
// If this is not a directory we can now return from the function, there is nothing
|
||||||
// left that we need to do.
|
// left that we need to do.
|
||||||
if st, err := os.Stat(cleaned); err != nil || !st.IsDir() {
|
if st, err := fs.unixFS.Lstatat(dirfd, name); err != nil || !st.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this was a directory, begin walking over its contents recursively and ensure that all
|
// This walker is probably some of the most efficient code in Wings. It has
|
||||||
// of the subfiles and directories get their permissions updated as well.
|
// an internally re-used buffer for listing directory entries and doesn't
|
||||||
err = godirwalk.Walk(cleaned, &godirwalk.Options{
|
// need to check if every individual path it touches is safe as the code
|
||||||
Unsorted: true,
|
// doesn't traverse symlinks, is immune to symlink timing attacks, and
|
||||||
Callback: func(p string, e *godirwalk.Dirent) error {
|
// gives us a dirfd and file name to make a direct syscall with.
|
||||||
// Do not attempt to chown a symlink. Go's os.Chown function will affect the symlink
|
if err := fs.unixFS.WalkDirat(dirfd, name, func(dirfd int, name, _ string, info ufs.DirEntry, err error) error {
|
||||||
// so if it points to a location outside the data directory the user would be able to
|
if err != nil {
|
||||||
// (un)intentionally modify that files permissions.
|
return err
|
||||||
if e.IsSymlink() {
|
}
|
||||||
if e.IsDir() {
|
if err := fs.unixFS.Lchownat(dirfd, name, uid, gid); err != nil {
|
||||||
return godirwalk.SkipThis
|
return err
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
return nil
|
}); err != nil {
|
||||||
}
|
return fmt.Errorf("server/filesystem: chown: failed to chown during walk function: %w", err)
|
||||||
|
}
|
||||||
return os.Chown(p, uid, gid)
|
return nil
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
return errors.Wrap(err, "server/filesystem: chown: failed to chown during walk function")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
|
func (fs *Filesystem) Chmod(path string, mode ufs.FileMode) error {
|
||||||
cleaned, err := fs.SafePath(path)
|
return fs.unixFS.Chmod(path, mode)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if fs.isTest {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Chmod(cleaned, mode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Begin looping up to 50 times to try and create a unique copy file name. This will take
|
// Begin looping up to 50 times to try and create a unique copy file name. This will take
|
||||||
|
@ -287,7 +270,7 @@ func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
|
||||||
// Could probably make this more efficient by checking if there are any files matching the copy
|
// Could probably make this more efficient by checking if there are any files matching the copy
|
||||||
// pattern, and trying to find the highest number and then incrementing it by one rather than
|
// pattern, and trying to find the highest number and then incrementing it by one rather than
|
||||||
// looping endlessly.
|
// looping endlessly.
|
||||||
func (fs *Filesystem) findCopySuffix(dir string, name string, extension string) (string, error) {
|
func (fs *Filesystem) findCopySuffix(dirfd int, name, extension string) (string, error) {
|
||||||
var i int
|
var i int
|
||||||
suffix := " copy"
|
suffix := " copy"
|
||||||
|
|
||||||
|
@ -299,11 +282,10 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
|
||||||
n := name + suffix + extension
|
n := name + suffix + extension
|
||||||
// If we stat the file and it does not exist that means we're good to create the copy. If it
|
// If we stat the file and it does not exist that means we're good to create the copy. If it
|
||||||
// does exist, we'll just continue to the next loop and try again.
|
// does exist, we'll just continue to the next loop and try again.
|
||||||
if _, err := fs.Stat(path.Join(dir, n)); err != nil {
|
if _, err := fs.unixFS.Lstatat(dirfd, n); err != nil {
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
if !errors.Is(err, ufs.ErrNotExist) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,53 +297,68 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
|
||||||
return name + suffix + extension, nil
|
return name + suffix + extension, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copies a given file to the same location and appends a suffix to the file to indicate that
|
// Copy copies a given file to the same location and appends a suffix to the
|
||||||
// it has been copied.
|
// file to indicate that it has been copied.
|
||||||
func (fs *Filesystem) Copy(p string) error {
|
func (fs *Filesystem) Copy(p string) error {
|
||||||
cleaned, err := fs.SafePath(p)
|
dirfd, name, closeFd, err := fs.unixFS.SafePath(p)
|
||||||
|
defer closeFd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
source, err := fs.unixFS.OpenFileat(dirfd, name, ufs.O_RDONLY, 0)
|
||||||
s, err := os.Stat(cleaned)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if s.IsDir() || !s.Mode().IsRegular() {
|
|
||||||
// If this is a directory or not a regular file, just throw a not-exist error
|
|
||||||
// since anything calling this function should understand what that means.
|
|
||||||
return os.ErrNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check that copying this file wouldn't put the server over its limit.
|
|
||||||
if err := fs.HasSpaceFor(s.Size()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
base := filepath.Base(cleaned)
|
|
||||||
relative := strings.TrimSuffix(strings.TrimPrefix(cleaned, fs.Path()), base)
|
|
||||||
extension := filepath.Ext(base)
|
|
||||||
name := strings.TrimSuffix(base, extension)
|
|
||||||
|
|
||||||
// Ensure that ".tar" is also counted as apart of the file extension.
|
|
||||||
// There might be a better way to handle this for other double file extensions,
|
|
||||||
// but this is a good workaround for now.
|
|
||||||
if strings.HasSuffix(name, ".tar") {
|
|
||||||
extension = ".tar" + extension
|
|
||||||
name = strings.TrimSuffix(name, ".tar")
|
|
||||||
}
|
|
||||||
|
|
||||||
source, err := os.Open(cleaned)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer source.Close()
|
defer source.Close()
|
||||||
|
info, err := source.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if info.IsDir() || !info.Mode().IsRegular() {
|
||||||
|
// If this is a directory or not a regular file, just throw a not-exist error
|
||||||
|
// since anything calling this function should understand what that means.
|
||||||
|
return ufs.ErrNotExist
|
||||||
|
}
|
||||||
|
currentSize := info.Size()
|
||||||
|
|
||||||
n, err := fs.findCopySuffix(relative, name, extension)
|
// Check that copying this file wouldn't put the server over its limit.
|
||||||
|
if err := fs.HasSpaceFor(currentSize); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
base := info.Name()
|
||||||
|
extension := filepath.Ext(base)
|
||||||
|
baseName := strings.TrimSuffix(base, extension)
|
||||||
|
|
||||||
|
// Ensure that ".tar" is also counted as apart of the file extension.
|
||||||
|
// There might be a better way to handle this for other double file extensions,
|
||||||
|
// but this is a good workaround for now.
|
||||||
|
if strings.HasSuffix(baseName, ".tar") {
|
||||||
|
extension = ".tar" + extension
|
||||||
|
baseName = strings.TrimSuffix(baseName, ".tar")
|
||||||
|
}
|
||||||
|
|
||||||
|
newName, err := fs.findCopySuffix(dirfd, baseName, extension)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dst, err := fs.unixFS.OpenFileat(dirfd, newName, ufs.O_WRONLY|ufs.O_CREATE, info.Mode())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.Writefile(path.Join(relative, n), source)
|
// Do not use CopyBuffer here, it is wasteful as the file implements
|
||||||
|
// io.ReaderFrom, which causes it to not use the buffer anyways.
|
||||||
|
n, err := io.Copy(dst, io.LimitReader(source, currentSize))
|
||||||
|
fs.unixFS.Add(n)
|
||||||
|
|
||||||
|
if !fs.isTest {
|
||||||
|
if err := fs.unixFS.Lchownat(dirfd, newName, config.Get().System.User.Uid, config.Get().System.User.Gid); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Return the error from io.Copy.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TruncateRootDirectory removes _all_ files and directories from a server's
|
// TruncateRootDirectory removes _all_ files and directories from a server's
|
||||||
|
@ -373,172 +370,128 @@ func (fs *Filesystem) TruncateRootDirectory() error {
|
||||||
if err := os.Mkdir(fs.Path(), 0o755); err != nil {
|
if err := os.Mkdir(fs.Path(), 0o755); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
_ = fs.unixFS.Close()
|
||||||
|
unixFS, err := ufs.NewUnixFS(fs.Path(), config.UseOpenat2())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var limit int64
|
||||||
|
if fs.isTest {
|
||||||
|
limit = 0
|
||||||
|
} else {
|
||||||
|
limit = fs.unixFS.Limit()
|
||||||
|
}
|
||||||
|
fs.unixFS = ufs.NewQuota(unixFS, limit)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete removes a file or folder from the system. Prevents the user from
|
// Delete removes a file or folder from the system. Prevents the user from
|
||||||
// accidentally (or maliciously) removing their root server data directory.
|
// accidentally (or maliciously) removing their root server data directory.
|
||||||
func (fs *Filesystem) Delete(p string) error {
|
func (fs *Filesystem) Delete(p string) error {
|
||||||
wg := sync.WaitGroup{}
|
return fs.unixFS.RemoveAll(p)
|
||||||
// This is one of the few (only?) places in the codebase where we're explicitly not using
|
|
||||||
// the SafePath functionality when working with user provided input. If we did, you would
|
|
||||||
// not be able to delete a file that is a symlink pointing to a location outside of the data
|
|
||||||
// directory.
|
|
||||||
//
|
|
||||||
// We also want to avoid resolving a symlink that points _within_ the data directory and thus
|
|
||||||
// deleting the actual source file for the symlink rather than the symlink itself. For these
|
|
||||||
// purposes just resolve the actual file path using filepath.Join() and confirm that the path
|
|
||||||
// exists within the data directory.
|
|
||||||
resolved := fs.unsafeFilePath(p)
|
|
||||||
if !fs.unsafeIsInDataDirectory(resolved) {
|
|
||||||
return NewBadPathResolution(p, resolved)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block any whoopsies.
|
|
||||||
if resolved == fs.Path() {
|
|
||||||
return errors.New("cannot delete root server directory")
|
|
||||||
}
|
|
||||||
|
|
||||||
if st, err := os.Lstat(resolved); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
fs.error(err).Warn("error while attempting to stat file before deletion")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !st.IsDir() {
|
|
||||||
fs.addDisk(-st.Size())
|
|
||||||
} else {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(wg *sync.WaitGroup, st os.FileInfo, resolved string) {
|
|
||||||
defer wg.Done()
|
|
||||||
if s, err := fs.DirectorySize(resolved); err == nil {
|
|
||||||
fs.addDisk(-s)
|
|
||||||
}
|
|
||||||
}(&wg, st, resolved)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
return os.RemoveAll(resolved)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type fileOpener struct {
|
//type fileOpener struct {
|
||||||
busy uint
|
// fs *Filesystem
|
||||||
}
|
// busy uint
|
||||||
|
//}
|
||||||
// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
|
//
|
||||||
// cannot be opened because of a "text file busy" error, we will attempt until the number of attempts
|
//// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
|
||||||
// has been exhaused, at which point we will abort with an error.
|
//// cannot be opened because of a "text file busy" error, we will attempt until the number of attempts
|
||||||
func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File, error) {
|
//// has been exhaused, at which point we will abort with an error.
|
||||||
for {
|
//func (fo *fileOpener) open(path string, flags int, perm ufs.FileMode) (ufs.File, error) {
|
||||||
f, err := os.OpenFile(path, flags, perm)
|
// for {
|
||||||
|
// f, err := fo.fs.unixFS.OpenFile(path, flags, perm)
|
||||||
// If there is an error because the text file is busy, go ahead and sleep for a few
|
//
|
||||||
// hundred milliseconds and then try again up to three times before just returning the
|
// // If there is an error because the text file is busy, go ahead and sleep for a few
|
||||||
// error back to the caller.
|
// // hundred milliseconds and then try again up to three times before just returning the
|
||||||
//
|
// // error back to the caller.
|
||||||
// Based on code from: https://github.com/golang/go/issues/22220#issuecomment-336458122
|
// //
|
||||||
if err != nil && fo.busy < 3 && strings.Contains(err.Error(), "text file busy") {
|
// // Based on code from: https://github.com/golang/go/issues/22220#issuecomment-336458122
|
||||||
time.Sleep(100 * time.Millisecond << fo.busy)
|
// if err != nil && fo.busy < 3 && strings.Contains(err.Error(), "text file busy") {
|
||||||
fo.busy++
|
// time.Sleep(100 * time.Millisecond << fo.busy)
|
||||||
continue
|
// fo.busy++
|
||||||
}
|
// continue
|
||||||
|
// }
|
||||||
return f, err
|
//
|
||||||
}
|
// return f, err
|
||||||
}
|
// }
|
||||||
|
//}
|
||||||
|
|
||||||
// ListDirectory lists the contents of a given directory and returns stat
|
// ListDirectory lists the contents of a given directory and returns stat
|
||||||
// information about each file and folder within it.
|
// information about each file and folder within it.
|
||||||
func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
|
func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
|
||||||
cleaned, err := fs.SafePath(p)
|
// Read entries from the path on the filesystem, using the mapped reader, so
|
||||||
if err != nil {
|
// we can map the DirEntry slice into a Stat slice with mimetype information.
|
||||||
return nil, err
|
out, err := ufs.ReadDirMap(fs.unixFS.UnixFS, p, func(e ufs.DirEntry) (Stat, error) {
|
||||||
}
|
info, err := e.Info()
|
||||||
|
if err != nil {
|
||||||
files, err := ioutil.ReadDir(cleaned)
|
return Stat{}, err
|
||||||
if err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
var d string
|
||||||
|
if e.Type().IsDir() {
|
||||||
var wg sync.WaitGroup
|
d = "inode/directory"
|
||||||
|
} else {
|
||||||
// You must initialize the output of this directory as a non-nil value otherwise
|
d = "application/octet-stream"
|
||||||
// when it is marshaled into a JSON object you'll just get 'null' back, which will
|
}
|
||||||
// break the panel badly.
|
var m *mimetype.MIME
|
||||||
out := make([]Stat, len(files))
|
if e.Type().IsRegular() {
|
||||||
|
// TODO: I should probably find a better way to do this.
|
||||||
// Iterate over all of the files and directories returned and perform an async process
|
eO := e.(interface {
|
||||||
// to get the mime-type for them all.
|
Open() (ufs.File, error)
|
||||||
for i, file := range files {
|
})
|
||||||
wg.Add(1)
|
f, err := eO.Open()
|
||||||
|
if err != nil {
|
||||||
go func(idx int, f os.FileInfo) {
|
return Stat{}, err
|
||||||
defer wg.Done()
|
}
|
||||||
|
m, err = mimetype.DetectReader(f)
|
||||||
var m *mimetype.MIME
|
if err != nil {
|
||||||
d := "inode/directory"
|
log.Error(err.Error())
|
||||||
if !f.IsDir() {
|
}
|
||||||
cleanedp := filepath.Join(cleaned, f.Name())
|
_ = f.Close()
|
||||||
if f.Mode()&os.ModeSymlink != 0 {
|
}
|
||||||
cleanedp, _ = fs.SafePath(filepath.Join(cleaned, f.Name()))
|
|
||||||
}
|
st := Stat{FileInfo: info, Mimetype: d}
|
||||||
|
if m != nil {
|
||||||
// Don't try to detect the type on a pipe — this will just hang the application and
|
st.Mimetype = m.String()
|
||||||
// you'll never get a response back.
|
}
|
||||||
//
|
return st, nil
|
||||||
// @see https://github.com/pterodactyl/panel/issues/4059
|
})
|
||||||
if cleanedp != "" && f.Mode()&os.ModeNamedPipe == 0 {
|
if err != nil {
|
||||||
m, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
|
return nil, err
|
||||||
} else {
|
}
|
||||||
// Just pass this for an unknown type because the file could not safely be resolved within
|
|
||||||
// the server data path.
|
// Sort entries alphabetically.
|
||||||
d = "application/octet-stream"
|
slices.SortStableFunc(out, func(a, b Stat) int {
|
||||||
}
|
switch {
|
||||||
}
|
case a.Name() == b.Name():
|
||||||
|
return 0
|
||||||
st := Stat{FileInfo: f, Mimetype: d}
|
case a.Name() > b.Name():
|
||||||
if m != nil {
|
return 1
|
||||||
st.Mimetype = m.String()
|
default:
|
||||||
}
|
return -1
|
||||||
out[idx] = st
|
|
||||||
}(i, file)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Sort the output alphabetically to begin with since we've run the output
|
|
||||||
// through an asynchronous process and the order is gonna be very random.
|
|
||||||
sort.SliceStable(out, func(i, j int) bool {
|
|
||||||
if out[i].Name() == out[j].Name() || out[i].Name() > out[j].Name() {
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Then, sort it so that directories are listed first in the output. Everything
|
// Sort folders before other file types.
|
||||||
// will continue to be alphabetized at this point.
|
slices.SortStableFunc(out, func(a, b Stat) int {
|
||||||
sort.SliceStable(out, func(i, j int) bool {
|
switch {
|
||||||
return out[i].IsDir()
|
case a.IsDir() && b.IsDir():
|
||||||
|
return 0
|
||||||
|
case a.IsDir():
|
||||||
|
return -1
|
||||||
|
default:
|
||||||
|
return 1
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *Filesystem) Chtimes(path string, atime, mtime time.Time) error {
|
func (fs *Filesystem) Chtimes(path string, atime, mtime time.Time) error {
|
||||||
cleaned, err := fs.SafePath(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if fs.isTest {
|
if fs.isTest {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return fs.unixFS.Chtimes(path, atime, mtime)
|
||||||
if err := os.Chtimes(cleaned, atime, mtime); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,12 +7,13 @@ import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
"testing"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
|
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -28,15 +29,23 @@ func NewFs() (*Filesystem, *rootFs) {
|
||||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "pterodactyl")
|
tmpDir, err := os.MkdirTemp(os.TempDir(), "pterodactyl")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
// defer os.RemoveAll(tmpDir)
|
|
||||||
|
|
||||||
rfs := rootFs{root: tmpDir}
|
rfs := rootFs{root: tmpDir}
|
||||||
|
|
||||||
rfs.reset()
|
p := filepath.Join(tmpDir, "server")
|
||||||
|
if err := os.Mkdir(p, 0o755); err != nil {
|
||||||
|
panic(err)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
fs := New(filepath.Join(tmpDir, "/server"), 0, []string{})
|
fs, _ := New(p, 0, []string{})
|
||||||
fs.isTest = true
|
fs.isTest = true
|
||||||
|
if err := fs.TruncateRootDirectory(); err != nil {
|
||||||
|
panic(err)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
return fs, &rfs
|
return fs, &rfs
|
||||||
}
|
}
|
||||||
|
@ -45,7 +54,7 @@ type rootFs struct {
|
||||||
root string
|
root string
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFileContent(file *os.File) string {
|
func getFileContent(file ufs.File) string {
|
||||||
var w bytes.Buffer
|
var w bytes.Buffer
|
||||||
if _, err := bufio.NewReader(file).WriteTo(&w); err != nil {
|
if _, err := bufio.NewReader(file).WriteTo(&w); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -54,11 +63,11 @@ func getFileContent(file *os.File) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
|
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
|
||||||
f, err := os.Create(filepath.Join(rfs.root, "/server", p))
|
f, err := os.Create(filepath.Join(rfs.root, "server", p))
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.Write(c)
|
_, _ = f.Write(c)
|
||||||
f.Close()
|
_ = f.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
@ -69,19 +78,7 @@ func (rfs *rootFs) CreateServerFileFromString(p string, c string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rfs *rootFs) StatServerFile(p string) (os.FileInfo, error) {
|
func (rfs *rootFs) StatServerFile(p string) (os.FileInfo, error) {
|
||||||
return os.Stat(filepath.Join(rfs.root, "/server", p))
|
return os.Stat(filepath.Join(rfs.root, "server", p))
|
||||||
}
|
|
||||||
|
|
||||||
func (rfs *rootFs) reset() {
|
|
||||||
if err := os.RemoveAll(filepath.Join(rfs.root, "/server")); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Mkdir(filepath.Join(rfs.root, "/server"), 0o755); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilesystem_Openfile(t *testing.T) {
|
func TestFilesystem_Openfile(t *testing.T) {
|
||||||
|
@ -93,7 +90,8 @@ func TestFilesystem_Openfile(t *testing.T) {
|
||||||
_, _, err := fs.File("foo/bar.txt")
|
_, _, err := fs.File("foo/bar.txt")
|
||||||
|
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrNotExist)).IsTrue()
|
// TODO
|
||||||
|
//g.Assert(IsErrorCode(err, ErrNotExist)).IsTrue()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("returns file stat information", func() {
|
g.It("returns file stat information", func() {
|
||||||
|
@ -108,14 +106,14 @@ func TestFilesystem_Openfile(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilesystem_Writefile(t *testing.T) {
|
func TestFilesystem_Writefile(t *testing.T) {
|
||||||
g := Goblin(t)
|
g := Goblin(t)
|
||||||
fs, rfs := NewFs()
|
fs, _ := NewFs()
|
||||||
|
|
||||||
g.Describe("Open and WriteFile", func() {
|
g.Describe("Open and WriteFile", func() {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
|
@ -125,22 +123,22 @@ func TestFilesystem_Writefile(t *testing.T) {
|
||||||
g.It("can create a new file", func() {
|
g.It("can create a new file", func() {
|
||||||
r := bytes.NewReader([]byte("test file content"))
|
r := bytes.NewReader([]byte("test file content"))
|
||||||
|
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
|
g.Assert(fs.CachedUsage()).Equal(int64(0))
|
||||||
|
|
||||||
err := fs.Writefile("test.txt", r)
|
err := fs.Write("test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
f, _, err := fs.File("test.txt")
|
f, _, err := fs.File("test.txt")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
g.Assert(getFileContent(f)).Equal("test file content")
|
g.Assert(getFileContent(f)).Equal("test file content")
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(r.Size())
|
g.Assert(fs.CachedUsage()).Equal(r.Size())
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("can create a new file inside a nested directory with leading slash", func() {
|
g.It("can create a new file inside a nested directory with leading slash", func() {
|
||||||
r := bytes.NewReader([]byte("test file content"))
|
r := bytes.NewReader([]byte("test file content"))
|
||||||
|
|
||||||
err := fs.Writefile("/some/nested/test.txt", r)
|
err := fs.Write("/some/nested/test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
f, _, err := fs.File("/some/nested/test.txt")
|
f, _, err := fs.File("/some/nested/test.txt")
|
||||||
|
@ -152,7 +150,7 @@ func TestFilesystem_Writefile(t *testing.T) {
|
||||||
g.It("can create a new file inside a nested directory without a trailing slash", func() {
|
g.It("can create a new file inside a nested directory without a trailing slash", func() {
|
||||||
r := bytes.NewReader([]byte("test file content"))
|
r := bytes.NewReader([]byte("test file content"))
|
||||||
|
|
||||||
err := fs.Writefile("some/../foo/bar/test.txt", r)
|
err := fs.Write("some/../foo/bar/test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
f, _, err := fs.File("foo/bar/test.txt")
|
f, _, err := fs.File("foo/bar/test.txt")
|
||||||
|
@ -164,13 +162,13 @@ func TestFilesystem_Writefile(t *testing.T) {
|
||||||
g.It("cannot create a file outside the root directory", func() {
|
g.It("cannot create a file outside the root directory", func() {
|
||||||
r := bytes.NewReader([]byte("test file content"))
|
r := bytes.NewReader([]byte("test file content"))
|
||||||
|
|
||||||
err := fs.Writefile("/some/../foo/../../test.txt", r)
|
err := fs.Write("/some/../foo/../../test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot write a file that exceeds the disk limits", func() {
|
g.It("cannot write a file that exceeds the disk limits", func() {
|
||||||
atomic.StoreInt64(&fs.diskLimit, 1024)
|
fs.SetDiskLimit(1024)
|
||||||
|
|
||||||
b := make([]byte, 1025)
|
b := make([]byte, 1025)
|
||||||
_, err := rand.Read(b)
|
_, err := rand.Read(b)
|
||||||
|
@ -178,18 +176,18 @@ func TestFilesystem_Writefile(t *testing.T) {
|
||||||
g.Assert(len(b)).Equal(1025)
|
g.Assert(len(b)).Equal(1025)
|
||||||
|
|
||||||
r := bytes.NewReader(b)
|
r := bytes.NewReader(b)
|
||||||
err = fs.Writefile("test.txt", r)
|
err = fs.Write("test.txt", r, int64(len(b)), 0o644)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("truncates the file when writing new contents", func() {
|
g.It("truncates the file when writing new contents", func() {
|
||||||
r := bytes.NewReader([]byte("original data"))
|
r := bytes.NewReader([]byte("original data"))
|
||||||
err := fs.Writefile("test.txt", r)
|
err := fs.Write("test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
r = bytes.NewReader([]byte("new data"))
|
r = bytes.NewReader([]byte("new data"))
|
||||||
err = fs.Writefile("test.txt", r)
|
err = fs.Write("test.txt", r, r.Size(), 0o644)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
f, _, err := fs.File("test.txt")
|
f, _, err := fs.File("test.txt")
|
||||||
|
@ -200,10 +198,7 @@ func TestFilesystem_Writefile(t *testing.T) {
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
buf.Truncate(0)
|
buf.Truncate(0)
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
|
||||||
atomic.StoreInt64(&fs.diskLimit, 0)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -236,17 +231,17 @@ func TestFilesystem_CreateDirectory(t *testing.T) {
|
||||||
g.It("should not allow the creation of directories outside the root", func() {
|
g.It("should not allow the creation of directories outside the root", func() {
|
||||||
err := fs.CreateDirectory("test", "e/../../something")
|
err := fs.CreateDirectory("test", "e/../../something")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should not increment the disk usage", func() {
|
g.It("should not increment the disk usage", func() {
|
||||||
err := fs.CreateDirectory("test", "/")
|
err := fs.CreateDirectory("test", "/")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
|
g.Assert(fs.CachedUsage()).Equal(int64(0))
|
||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -268,25 +263,25 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||||
|
|
||||||
err = fs.Rename("source.txt", "target.txt")
|
err = fs.Rename("source.txt", "target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrExist)).IsTrue("err is not ErrExist")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("returns an error if the final destination is the root directory", func() {
|
g.It("returns an error if the final destination is the root directory", func() {
|
||||||
err := fs.Rename("source.txt", "/")
|
err := fs.Rename("source.txt", "/")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("returns an error if the source destination is the root directory", func() {
|
g.It("returns an error if the source destination is the root directory", func() {
|
||||||
err := fs.Rename("source.txt", "/")
|
err := fs.Rename("/", "target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not allow renaming to a location outside the root", func() {
|
g.It("does not allow renaming to a location outside the root", func() {
|
||||||
err := fs.Rename("source.txt", "../target.txt")
|
err := fs.Rename("source.txt", "../target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not allow renaming from a location outside the root", func() {
|
g.It("does not allow renaming from a location outside the root", func() {
|
||||||
|
@ -294,7 +289,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||||
|
|
||||||
err = fs.Rename("/../ext-source.txt", "target.txt")
|
err = fs.Rename("/../ext-source.txt", "target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("allows a file to be renamed", func() {
|
g.It("allows a file to be renamed", func() {
|
||||||
|
@ -303,7 +298,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||||
|
|
||||||
_, err = rfs.StatServerFile("source.txt")
|
_, err = rfs.StatServerFile("source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
|
|
||||||
st, err := rfs.StatServerFile("target.txt")
|
st, err := rfs.StatServerFile("target.txt")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
@ -320,7 +315,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||||
|
|
||||||
_, err = rfs.StatServerFile("source_dir")
|
_, err = rfs.StatServerFile("source_dir")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
|
|
||||||
st, err := rfs.StatServerFile("target_dir")
|
st, err := rfs.StatServerFile("target_dir")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
@ -330,7 +325,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||||
g.It("returns an error if the source does not exist", func() {
|
g.It("returns an error if the source does not exist", func() {
|
||||||
err := fs.Rename("missing.txt", "target.txt")
|
err := fs.Rename("missing.txt", "target.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("creates directories if they are missing", func() {
|
g.It("creates directories if they are missing", func() {
|
||||||
|
@ -343,7 +338,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -358,13 +353,13 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")))
|
fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content")))
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source does not exist", func() {
|
g.It("should return an error if the source does not exist", func() {
|
||||||
err := fs.Copy("foo.txt")
|
err := fs.Copy("foo.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source is outside the root", func() {
|
g.It("should return an error if the source is outside the root", func() {
|
||||||
|
@ -372,11 +367,11 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||||
|
|
||||||
err = fs.Copy("../ext-source.txt")
|
err = fs.Copy("../ext-source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source directory is outside the root", func() {
|
g.It("should return an error if the source directory is outside the root", func() {
|
||||||
err := os.MkdirAll(filepath.Join(rfs.root, "/nested/in/dir"), 0o755)
|
err := os.MkdirAll(filepath.Join(rfs.root, "nested/in/dir"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = rfs.CreateServerFileFromString("/../nested/in/dir/ext-source.txt", "external content")
|
err = rfs.CreateServerFileFromString("/../nested/in/dir/ext-source.txt", "external content")
|
||||||
|
@ -384,28 +379,28 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||||
|
|
||||||
err = fs.Copy("../nested/in/dir/ext-source.txt")
|
err = fs.Copy("../nested/in/dir/ext-source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
|
|
||||||
err = fs.Copy("nested/in/../../../nested/in/dir/ext-source.txt")
|
err = fs.Copy("nested/in/../../../nested/in/dir/ext-source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source is a directory", func() {
|
g.It("should return an error if the source is a directory", func() {
|
||||||
err := os.Mkdir(filepath.Join(rfs.root, "/server/dir"), 0o755)
|
err := os.Mkdir(filepath.Join(rfs.root, "server/dir"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Copy("dir")
|
err = fs.Copy("dir")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if there is not space to copy the file", func() {
|
g.It("should return an error if there is not space to copy the file", func() {
|
||||||
atomic.StoreInt64(&fs.diskLimit, 2)
|
fs.SetDiskLimit(2)
|
||||||
|
|
||||||
err := fs.Copy("source.txt")
|
err := fs.Copy("source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
|
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue("err is not ErrCodeDiskSpace")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should create a copy of the file and increment the disk used", func() {
|
g.It("should create a copy of the file and increment the disk used", func() {
|
||||||
|
@ -433,7 +428,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(utf8.RuneCountInString("test content")) * 3)
|
g.Assert(fs.CachedUsage()).Equal(int64(utf8.RuneCountInString("test content")) * 3)
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should create a copy inside of a directory", func() {
|
g.It("should create a copy inside of a directory", func() {
|
||||||
|
@ -454,10 +449,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.AfterEach(func() {
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
|
||||||
atomic.StoreInt64(&fs.diskLimit, 0)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -472,7 +464,7 @@ func TestFilesystem_Delete(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")))
|
fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content")))
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not delete files outside the root directory", func() {
|
g.It("does not delete files outside the root directory", func() {
|
||||||
|
@ -480,13 +472,13 @@ func TestFilesystem_Delete(t *testing.T) {
|
||||||
|
|
||||||
err = fs.Delete("../ext-source.txt")
|
err = fs.Delete("../ext-source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not allow the deletion of the root directory", func() {
|
g.It("does not allow the deletion of the root directory", func() {
|
||||||
err := fs.Delete("/")
|
err := fs.Delete("/")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(err.Error()).Equal("cannot delete root server directory")
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("does not return an error if the target does not exist", func() {
|
g.It("does not return an error if the target does not exist", func() {
|
||||||
|
@ -504,9 +496,9 @@ func TestFilesystem_Delete(t *testing.T) {
|
||||||
|
|
||||||
_, err = rfs.StatServerFile("source.txt")
|
_, err = rfs.StatServerFile("source.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
|
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
|
g.Assert(fs.CachedUsage()).Equal(int64(0))
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("deletes all items inside a directory if the directory is deleted", func() {
|
g.It("deletes all items inside a directory if the directory is deleted", func() {
|
||||||
|
@ -524,24 +516,95 @@ func TestFilesystem_Delete(t *testing.T) {
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")*3))
|
fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content") * 3))
|
||||||
|
|
||||||
err = fs.Delete("foo")
|
err = fs.Delete("foo")
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
|
g.Assert(fs.unixFS.Usage()).Equal(int64(0))
|
||||||
|
|
||||||
for _, s := range sources {
|
for _, s := range sources {
|
||||||
_, err = rfs.StatServerFile(s)
|
_, err = rfs.StatServerFile(s)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
g.AfterEach(func() {
|
g.It("deletes a symlink but not it's target within the root directory", func() {
|
||||||
rfs.reset()
|
// Symlink to a file inside the root directory.
|
||||||
|
err := os.Symlink(filepath.Join(rfs.root, "server/source.txt"), filepath.Join(rfs.root, "server/symlink.txt"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
// Delete the symlink itself.
|
||||||
atomic.StoreInt64(&fs.diskLimit, 0)
|
err = fs.Delete("symlink.txt")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Ensure the symlink was deleted.
|
||||||
|
_, err = os.Lstat(filepath.Join(rfs.root, "server/symlink.txt"))
|
||||||
|
g.Assert(err).IsNotNil()
|
||||||
|
|
||||||
|
// Ensure the symlink target still exists.
|
||||||
|
_, err = os.Lstat(filepath.Join(rfs.root, "server/source.txt"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
})
|
||||||
|
|
||||||
|
g.It("does not delete files symlinked outside of the root directory", func() {
|
||||||
|
// Create a file outside the root directory.
|
||||||
|
err := rfs.CreateServerFileFromString("/../source.txt", "test content")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Create a symlink to the file outside the root directory.
|
||||||
|
err = os.Symlink(filepath.Join(rfs.root, "source.txt"), filepath.Join(rfs.root, "/server/symlink.txt"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Delete the symlink. (This should pass as we will delete the symlink itself, not it's target)
|
||||||
|
err = fs.Delete("symlink.txt")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Ensure the file outside the root directory still exists.
|
||||||
|
_, err = os.Lstat(filepath.Join(rfs.root, "source.txt"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
})
|
||||||
|
|
||||||
|
g.It("does not delete files symlinked through a directory outside of the root directory", func() {
|
||||||
|
// Create a directory outside the root directory.
|
||||||
|
err := os.Mkdir(filepath.Join(rfs.root, "foo"), 0o755)
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Create a file inside the directory that is outside the root.
|
||||||
|
err = rfs.CreateServerFileFromString("/../foo/source.txt", "test content")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Symlink the directory that is outside the root to a file inside the root.
|
||||||
|
err = os.Symlink(filepath.Join(rfs.root, "foo"), filepath.Join(rfs.root, "server/symlink"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Delete a file inside the symlinked directory.
|
||||||
|
err = fs.Delete("symlink/source.txt")
|
||||||
|
g.Assert(err).IsNotNil()
|
||||||
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
|
|
||||||
|
// Ensure the file outside the root directory still exists.
|
||||||
|
_, err = os.Lstat(filepath.Join(rfs.root, "foo/source.txt"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
})
|
||||||
|
|
||||||
|
g.It("returns an error when trying to delete a non-existent file symlinked through a directory outside of the root directory", func() {
|
||||||
|
// Create a directory outside the root directory.
|
||||||
|
err := os.Mkdir(filepath.Join(rfs.root, "foo2"), 0o755)
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Symlink the directory that is outside the root to a file inside the root.
|
||||||
|
err = os.Symlink(filepath.Join(rfs.root, "foo2"), filepath.Join(rfs.root, "server/symlink"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
|
// Delete a file inside the symlinked directory.
|
||||||
|
err = fs.Delete("symlink/source.txt")
|
||||||
|
g.Assert(err).IsNotNil()
|
||||||
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
|
})
|
||||||
|
|
||||||
|
g.AfterEach(func() {
|
||||||
|
_ = fs.TruncateRootDirectory()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,95 +1,28 @@
|
||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Checks if the given file or path is in the server's file denylist. If so, an Error
|
// Checks if the given file or path is in the server's file denylist. If so, an Error
|
||||||
// is returned, otherwise nil is returned.
|
// is returned, otherwise nil is returned.
|
||||||
func (fs *Filesystem) IsIgnored(paths ...string) error {
|
func (fs *Filesystem) IsIgnored(paths ...string) error {
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
sp, err := fs.SafePath(p)
|
//sp, err := fs.SafePath(p)
|
||||||
if err != nil {
|
//if err != nil {
|
||||||
return err
|
// return err
|
||||||
}
|
//}
|
||||||
if fs.denylist.MatchesPath(sp) {
|
// TODO: update logic to use unixFS
|
||||||
return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: sp})
|
if fs.denylist.MatchesPath(p) {
|
||||||
|
return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: p})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normalizes a directory being passed in to ensure the user is not able to escape
|
|
||||||
// from their data directory. After normalization if the directory is still within their home
|
|
||||||
// path it is returned. If they managed to "escape" an error will be returned.
|
|
||||||
//
|
|
||||||
// This logic is actually copied over from the SFTP server code. Ideally that eventually
|
|
||||||
// either gets ported into this application, or is able to make use of this package.
|
|
||||||
func (fs *Filesystem) SafePath(p string) (string, error) {
|
|
||||||
var nonExistentPathResolution string
|
|
||||||
|
|
||||||
// Start with a cleaned up path before checking the more complex bits.
|
|
||||||
r := fs.unsafeFilePath(p)
|
|
||||||
|
|
||||||
// At the same time, evaluate the symlink status and determine where this file or folder
|
|
||||||
// is truly pointing to.
|
|
||||||
ep, err := filepath.EvalSymlinks(r)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
|
|
||||||
} else if os.IsNotExist(err) {
|
|
||||||
// The requested directory doesn't exist, so at this point we need to iterate up the
|
|
||||||
// path chain until we hit a directory that _does_ exist and can be validated.
|
|
||||||
parts := strings.Split(filepath.Dir(r), "/")
|
|
||||||
|
|
||||||
var try string
|
|
||||||
// Range over all of the path parts and form directory pathings from the end
|
|
||||||
// moving up until we have a valid resolution or we run out of paths to try.
|
|
||||||
for k := range parts {
|
|
||||||
try = strings.Join(parts[:(len(parts)-k)], "/")
|
|
||||||
|
|
||||||
if !fs.unsafeIsInDataDirectory(try) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
t, err := filepath.EvalSymlinks(try)
|
|
||||||
if err == nil {
|
|
||||||
nonExistentPathResolution = t
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the new path doesn't start with their root directory there is clearly an escape
|
|
||||||
// attempt going on, and we should NOT resolve this path for them.
|
|
||||||
if nonExistentPathResolution != "" {
|
|
||||||
if !fs.unsafeIsInDataDirectory(nonExistentPathResolution) {
|
|
||||||
return "", NewBadPathResolution(p, nonExistentPathResolution)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the nonExistentPathResolution variable is not empty then the initial path requested
|
|
||||||
// did not exist and we looped through the pathway until we found a match. At this point
|
|
||||||
// we've confirmed the first matched pathway exists in the root server directory, so we
|
|
||||||
// can go ahead and just return the path that was requested initially.
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the requested directory from EvalSymlinks begins with the server root directory go
|
|
||||||
// ahead and return it. If not we'll return an error which will block any further action
|
|
||||||
// on the file.
|
|
||||||
if fs.unsafeIsInDataDirectory(ep) {
|
|
||||||
return ep, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", NewBadPathResolution(p, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate a path to the file by cleaning it up and appending the root server path to it. This
|
// Generate a path to the file by cleaning it up and appending the root server path to it. This
|
||||||
// DOES NOT guarantee that the file resolves within the server data directory. You'll want to use
|
// DOES NOT guarantee that the file resolves within the server data directory. You'll want to use
|
||||||
// the fs.unsafeIsInDataDirectory(p) function to confirm.
|
// the fs.unsafeIsInDataDirectory(p) function to confirm.
|
||||||
|
@ -108,51 +41,3 @@ func (fs *Filesystem) unsafeFilePath(p string) string {
|
||||||
func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool {
|
func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool {
|
||||||
return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/")
|
return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Executes the fs.SafePath function in parallel against an array of paths. If any of the calls
|
|
||||||
// fails an error will be returned.
|
|
||||||
func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
|
|
||||||
var cleaned []string
|
|
||||||
|
|
||||||
// Simple locker function to avoid racy appends to the array of cleaned paths.
|
|
||||||
m := new(sync.Mutex)
|
|
||||||
push := func(c string) {
|
|
||||||
m.Lock()
|
|
||||||
cleaned = append(cleaned, c)
|
|
||||||
m.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an error group that we can use to run processes in parallel while retaining
|
|
||||||
// the ability to cancel the entire process immediately should any of it fail.
|
|
||||||
g, ctx := errgroup.WithContext(context.Background())
|
|
||||||
|
|
||||||
// Iterate over all of the paths and generate a cleaned path, if there is an error for any
|
|
||||||
// of the files, abort the process.
|
|
||||||
for _, p := range paths {
|
|
||||||
// Create copy so we can use it within the goroutine correctly.
|
|
||||||
pi := p
|
|
||||||
|
|
||||||
// Recursively call this function to continue digging through the directory tree within
|
|
||||||
// a separate goroutine. If the context is canceled abort this process.
|
|
||||||
g.Go(func() error {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
// If the callback returns true, go ahead and keep walking deeper. This allows
|
|
||||||
// us to programmatically continue deeper into directories, or stop digging
|
|
||||||
// if that pathway knows it needs nothing else.
|
|
||||||
if c, err := fs.SafePath(pi); err != nil {
|
|
||||||
return err
|
|
||||||
} else {
|
|
||||||
push(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block until all of the routines finish and have returned a value.
|
|
||||||
return cleaned, g.Wait()
|
|
||||||
}
|
|
||||||
|
|
|
@ -8,6 +8,8 @@ import (
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
. "github.com/franela/goblin"
|
. "github.com/franela/goblin"
|
||||||
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFilesystem_Path(t *testing.T) {
|
func TestFilesystem_Path(t *testing.T) {
|
||||||
|
@ -21,80 +23,6 @@ func TestFilesystem_Path(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilesystem_SafePath(t *testing.T) {
|
|
||||||
g := Goblin(t)
|
|
||||||
fs, rfs := NewFs()
|
|
||||||
prefix := filepath.Join(rfs.root, "/server")
|
|
||||||
|
|
||||||
g.Describe("SafePath", func() {
|
|
||||||
g.It("returns a cleaned path to a given file", func() {
|
|
||||||
p, err := fs.SafePath("test.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/test.txt")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("/test.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/test.txt")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("./test.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/test.txt")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("/foo/../test.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/test.txt")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("/foo/bar")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/foo/bar")
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("handles root directory access", func() {
|
|
||||||
p, err := fs.SafePath("/")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix)
|
|
||||||
|
|
||||||
p, err = fs.SafePath("")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix)
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("removes trailing slashes from paths", func() {
|
|
||||||
p, err := fs.SafePath("/foo/bar/")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/foo/bar")
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("handles deeply nested directories that do not exist", func() {
|
|
||||||
p, err := fs.SafePath("/foo/bar/baz/quaz/../../ducks/testing.txt")
|
|
||||||
g.Assert(err).IsNil()
|
|
||||||
g.Assert(p).Equal(prefix + "/foo/bar/ducks/testing.txt")
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("blocks access to files outside the root directory", func() {
|
|
||||||
p, err := fs.SafePath("../test.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
g.Assert(p).Equal("")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("/../test.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
g.Assert(p).Equal("")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("./foo/../../test.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
g.Assert(p).Equal("")
|
|
||||||
|
|
||||||
p, err = fs.SafePath("..")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
g.Assert(p).Equal("")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// We test against accessing files outside the root directory in the tests, however it
|
// We test against accessing files outside the root directory in the tests, however it
|
||||||
// is still possible for someone to mess up and not properly use this safe path call. In
|
// is still possible for someone to mess up and not properly use this safe path call. In
|
||||||
// order to truly confirm this, we'll try to pass in a symlinked malicious file to all of
|
// order to truly confirm this, we'll try to pass in a symlinked malicious file to all of
|
||||||
|
@ -115,6 +43,14 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := os.Symlink(filepath.Join(rfs.root, "malicious_does_not_exist.txt"), filepath.Join(rfs.root, "/server/symlinked_does_not_exist.txt")); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Symlink(filepath.Join(rfs.root, "/server/symlinked_does_not_exist.txt"), filepath.Join(rfs.root, "/server/symlinked_does_not_exist2.txt")); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
if err := os.Symlink(filepath.Join(rfs.root, "/malicious_dir"), filepath.Join(rfs.root, "/server/external_dir")); err != nil {
|
if err := os.Symlink(filepath.Join(rfs.root, "/malicious_dir"), filepath.Join(rfs.root, "/server/external_dir")); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -125,7 +61,23 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||||
|
|
||||||
err := fs.Writefile("symlinked.txt", r)
|
err := fs.Writefile("symlinked.txt", r)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
|
})
|
||||||
|
|
||||||
|
g.It("cannot write to a non-existent file symlinked outside the root", func() {
|
||||||
|
r := bytes.NewReader([]byte("testing what the fuck"))
|
||||||
|
|
||||||
|
err := fs.Writefile("symlinked_does_not_exist.txt", r)
|
||||||
|
g.Assert(err).IsNotNil()
|
||||||
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
|
})
|
||||||
|
|
||||||
|
g.It("cannot write to chained symlinks with target that does not exist outside the root", func() {
|
||||||
|
r := bytes.NewReader([]byte("testing what the fuck"))
|
||||||
|
|
||||||
|
err := fs.Writefile("symlinked_does_not_exist2.txt", r)
|
||||||
|
g.Assert(err).IsNotNil()
|
||||||
|
g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot write a file to a directory symlinked outside the root", func() {
|
g.It("cannot write a file to a directory symlinked outside the root", func() {
|
||||||
|
@ -133,7 +85,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||||
|
|
||||||
err := fs.Writefile("external_dir/foo.txt", r)
|
err := fs.Writefile("external_dir/foo.txt", r)
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -141,55 +93,54 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||||
g.It("cannot create a directory outside the root", func() {
|
g.It("cannot create a directory outside the root", func() {
|
||||||
err := fs.CreateDirectory("my_dir", "external_dir")
|
err := fs.CreateDirectory("my_dir", "external_dir")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot create a nested directory outside the root", func() {
|
g.It("cannot create a nested directory outside the root", func() {
|
||||||
err := fs.CreateDirectory("my/nested/dir", "external_dir/foo/bar")
|
err := fs.CreateDirectory("my/nested/dir", "external_dir/foo/bar")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot create a nested directory outside the root", func() {
|
g.It("cannot create a nested directory outside the root", func() {
|
||||||
err := fs.CreateDirectory("my/nested/dir", "external_dir/server")
|
err := fs.CreateDirectory("my/nested/dir", "external_dir/server")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
g.Describe("Rename", func() {
|
g.Describe("Rename", func() {
|
||||||
g.It("cannot rename a file symlinked outside the directory root", func() {
|
g.It("can rename a file symlinked outside the directory root", func() {
|
||||||
err := fs.Rename("symlinked.txt", "foo.txt")
|
_, err := os.Lstat(filepath.Join(rfs.root, "server", "symlinked.txt"))
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
err = fs.Rename("symlinked.txt", "foo.txt")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
_, err = os.Lstat(filepath.Join(rfs.root, "server", "foo.txt"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot rename a symlinked directory outside the root", func() {
|
g.It("can rename a symlinked directory outside the root", func() {
|
||||||
err := fs.Rename("external_dir", "foo")
|
_, err := os.Lstat(filepath.Join(rfs.root, "server", "external_dir"))
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
err = fs.Rename("external_dir", "foo")
|
||||||
|
g.Assert(err).IsNil()
|
||||||
|
_, err = os.Lstat(filepath.Join(rfs.root, "server", "foo"))
|
||||||
|
g.Assert(err).IsNil()
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("cannot rename a file to a location outside the directory root", func() {
|
g.It("cannot rename a file to a location outside the directory root", func() {
|
||||||
rfs.CreateServerFileFromString("my_file.txt", "internal content")
|
_ = rfs.CreateServerFileFromString("my_file.txt", "internal content")
|
||||||
|
t.Log(rfs.root)
|
||||||
|
|
||||||
err := fs.Rename("my_file.txt", "external_dir/my_file.txt")
|
st, err := os.Lstat(filepath.Join(rfs.root, "server", "foo"))
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(st.Mode()&ufs.ModeSymlink != 0).IsTrue()
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
g.Describe("Chown", func() {
|
err = fs.Rename("my_file.txt", "foo/my_file.txt")
|
||||||
g.It("cannot chown a file symlinked outside the directory root", func() {
|
g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue()
|
||||||
err := fs.Chown("symlinked.txt")
|
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
|
||||||
|
|
||||||
g.It("cannot chown a directory symlinked outside the directory root", func() {
|
st, err = os.Lstat(filepath.Join(rfs.root, "malicious_dir", "my_file.txt"))
|
||||||
err := fs.Chown("external_dir")
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue()
|
||||||
g.Assert(err).IsNotNil()
|
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -197,7 +148,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||||
g.It("cannot copy a file symlinked outside the directory root", func() {
|
g.It("cannot copy a file symlinked outside the directory root", func() {
|
||||||
err := fs.Copy("symlinked.txt")
|
err := fs.Copy("symlinked.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -211,9 +162,9 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||||
|
|
||||||
_, err = rfs.StatServerFile("symlinked.txt")
|
_, err = rfs.StatServerFile("symlinked.txt")
|
||||||
g.Assert(err).IsNotNil()
|
g.Assert(err).IsNotNil()
|
||||||
g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
|
g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
rfs.reset()
|
_ = fs.TruncateRootDirectory()
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,16 +1,18 @@
|
||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"encoding/json"
|
||||||
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gabriel-vasile/mimetype"
|
"github.com/gabriel-vasile/mimetype"
|
||||||
"github.com/goccy/go-json"
|
|
||||||
|
"github.com/pterodactyl/wings/internal/ufs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Stat struct {
|
type Stat struct {
|
||||||
os.FileInfo
|
ufs.FileInfo
|
||||||
Mimetype string
|
Mimetype string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,40 +33,31 @@ func (s *Stat) MarshalJSON() ([]byte, error) {
|
||||||
Created: s.CTime().Format(time.RFC3339),
|
Created: s.CTime().Format(time.RFC3339),
|
||||||
Modified: s.ModTime().Format(time.RFC3339),
|
Modified: s.ModTime().Format(time.RFC3339),
|
||||||
Mode: s.Mode().String(),
|
Mode: s.Mode().String(),
|
||||||
// Using `&os.ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
|
// Using `&ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
|
||||||
ModeBits: strconv.FormatUint(uint64(s.Mode()&os.ModePerm), 8),
|
ModeBits: strconv.FormatUint(uint64(s.Mode()&ufs.ModePerm), 8),
|
||||||
Size: s.Size(),
|
Size: s.Size(),
|
||||||
Directory: s.IsDir(),
|
Directory: s.IsDir(),
|
||||||
File: !s.IsDir(),
|
File: !s.IsDir(),
|
||||||
Symlink: s.Mode().Perm()&os.ModeSymlink != 0,
|
Symlink: s.Mode().Perm()&ufs.ModeSymlink != 0,
|
||||||
Mime: s.Mimetype,
|
Mime: s.Mimetype,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat stats a file or folder and returns the base stat object from go along
|
func statFromFile(f ufs.File) (Stat, error) {
|
||||||
// with the MIME data that can be used for editing files.
|
s, err := f.Stat()
|
||||||
func (fs *Filesystem) Stat(p string) (Stat, error) {
|
|
||||||
cleaned, err := fs.SafePath(p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
return fs.unsafeStat(cleaned)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *Filesystem) unsafeStat(p string) (Stat, error) {
|
|
||||||
s, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
return Stat{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var m *mimetype.MIME
|
var m *mimetype.MIME
|
||||||
if !s.IsDir() {
|
if !s.IsDir() {
|
||||||
m, err = mimetype.DetectFile(p)
|
m, err = mimetype.DetectReader(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
|
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
st := Stat{
|
st := Stat{
|
||||||
FileInfo: s,
|
FileInfo: s,
|
||||||
Mimetype: "inode/directory",
|
Mimetype: "inode/directory",
|
||||||
|
@ -72,6 +65,20 @@ func (fs *Filesystem) unsafeStat(p string) (Stat, error) {
|
||||||
if m != nil {
|
if m != nil {
|
||||||
st.Mimetype = m.String()
|
st.Mimetype = m.String()
|
||||||
}
|
}
|
||||||
|
return st, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stat stats a file or folder and returns the base stat object from go along
|
||||||
|
// with the MIME data that can be used for editing files.
|
||||||
|
func (fs *Filesystem) Stat(p string) (Stat, error) {
|
||||||
|
f, err := fs.unixFS.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
st, err := statFromFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return Stat{}, err
|
||||||
|
}
|
||||||
return st, nil
|
return st, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
package filesystem
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CTime returns the time that the file/folder was created.
|
|
||||||
func (s *Stat) CTime() time.Time {
|
|
||||||
st := s.Sys().(*syscall.Stat_t)
|
|
||||||
|
|
||||||
return time.Unix(st.Ctimespec.Sec, st.Ctimespec.Nsec)
|
|
||||||
}
|
|
|
@ -3,12 +3,22 @@ package filesystem
|
||||||
import (
|
import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns the time that the file/folder was created.
|
// CTime returns the time that the file/folder was created.
|
||||||
|
//
|
||||||
|
// TODO: remove. Ctim is not actually ever been correct and doesn't actually
|
||||||
|
// return the creation time.
|
||||||
func (s *Stat) CTime() time.Time {
|
func (s *Stat) CTime() time.Time {
|
||||||
st := s.Sys().(*syscall.Stat_t)
|
if st, ok := s.Sys().(*unix.Stat_t); ok {
|
||||||
|
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
|
||||||
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
|
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
||||||
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
}
|
||||||
|
if st, ok := s.Sys().(*syscall.Stat_t); ok {
|
||||||
|
// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
|
||||||
|
return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
|
||||||
|
}
|
||||||
|
return time.Time{}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
package filesystem
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// On linux systems this will return the time that the file was created.
|
|
||||||
// However, I have no idea how to do this on windows, so we're skipping it
|
|
||||||
// for right now.
|
|
||||||
func (s *Stat) CTime() time.Time {
|
|
||||||
return s.ModTime()
|
|
||||||
}
|
|
|
@ -2,9 +2,7 @@ package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"html/template"
|
"html/template"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
@ -219,30 +217,18 @@ func (ip *InstallationProcess) tempDir() string {
|
||||||
// can be properly mounted into the installation container and then executed.
|
// can be properly mounted into the installation container and then executed.
|
||||||
func (ip *InstallationProcess) writeScriptToDisk() error {
|
func (ip *InstallationProcess) writeScriptToDisk() error {
|
||||||
// Make sure the temp directory root exists before trying to make a directory within it. The
|
// Make sure the temp directory root exists before trying to make a directory within it. The
|
||||||
// ioutil.TempDir call expects this base to exist, it won't create it for you.
|
// os.TempDir call expects this base to exist, it won't create it for you.
|
||||||
if err := os.MkdirAll(ip.tempDir(), 0o700); err != nil {
|
if err := os.MkdirAll(ip.tempDir(), 0o700); err != nil {
|
||||||
return errors.WithMessage(err, "could not create temporary directory for install process")
|
return errors.WithMessage(err, "could not create temporary directory for install process")
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.OpenFile(filepath.Join(ip.tempDir(), "install.sh"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
|
f, err := os.OpenFile(filepath.Join(ip.tempDir(), "install.sh"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "failed to write server installation script to disk before mount")
|
return errors.WithMessage(err, "failed to write server installation script to disk before mount")
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
if _, err := io.Copy(f, strings.NewReader(strings.ReplaceAll(ip.Script.Script, "\r\n", "\n"))); err != nil {
|
||||||
w := bufio.NewWriter(f)
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(bytes.NewReader([]byte(ip.Script.Script)))
|
|
||||||
for scanner.Scan() {
|
|
||||||
w.WriteString(scanner.Text() + "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Flush()
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,10 +413,6 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
if cfg.System.User.Rootless.Enabled {
|
|
||||||
conf.User = fmt.Sprintf("%d:%d", cfg.System.User.Rootless.ContainerUID, cfg.System.User.Rootless.ContainerGID)
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpfsSize := strconv.Itoa(int(cfg.Docker.TmpfsSize))
|
tmpfsSize := strconv.Itoa(int(cfg.Docker.TmpfsSize))
|
||||||
hostConf := &container.HostConfig{
|
hostConf := &container.HostConfig{
|
||||||
Mounts: []mount.Mount{
|
Mounts: []mount.Mount{
|
||||||
|
@ -453,7 +435,6 @@ func (ip *InstallationProcess) Execute() (string, error) {
|
||||||
},
|
},
|
||||||
DNS: cfg.Docker.Network.Dns,
|
DNS: cfg.Docker.Network.Dns,
|
||||||
LogConfig: cfg.Docker.ContainerLogConfig(),
|
LogConfig: cfg.Docker.ContainerLogConfig(),
|
||||||
Privileged: true,
|
|
||||||
NetworkMode: container.NetworkMode(cfg.Docker.Network.Mode),
|
NetworkMode: container.NetworkMode(cfg.Docker.Network.Mode),
|
||||||
UsernsMode: container.UsernsMode(cfg.Docker.UsernsMode),
|
UsernsMode: container.UsernsMode(cfg.Docker.UsernsMode),
|
||||||
}
|
}
|
||||||
|
|
|
@ -196,7 +196,10 @@ func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server,
|
||||||
return nil, errors.WithStackIf(err)
|
return nil, errors.WithStackIf(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
s.fs, err = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.WithStackIf(err)
|
||||||
|
}
|
||||||
|
|
||||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||||
// this logic in. When we're ready to support other environment we'll need to make
|
// this logic in. When we're ready to support other environment we'll need to make
|
||||||
|
|
|
@ -29,6 +29,21 @@ func (s *Server) Mounts() []environment.Mount {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Handle mounting a generated `/etc/passwd` if the feature is enabled.
|
||||||
|
if passwd := config.Get().System.Passwd; passwd.Enable {
|
||||||
|
s.Log().WithFields(log.Fields{"source_path": passwd.Directory}).Info("mouting generated /etc/{group,passwd} to workaround UID/GID issues")
|
||||||
|
m = append(m, environment.Mount{
|
||||||
|
Source: filepath.Join(passwd.Directory, "group"),
|
||||||
|
Target: "/etc/group",
|
||||||
|
ReadOnly: true,
|
||||||
|
})
|
||||||
|
m = append(m, environment.Mount{
|
||||||
|
Source: filepath.Join(passwd.Directory, "passwd"),
|
||||||
|
Target: "/etc/passwd",
|
||||||
|
ReadOnly: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Also include any of this server's custom mounts when returning them.
|
// Also include any of this server's custom mounts when returning them.
|
||||||
return append(m, s.customMounts()...)
|
return append(m, s.customMounts()...)
|
||||||
}
|
}
|
||||||
|
@ -56,14 +71,12 @@ func (s *Server) customMounts() []environment.Mount {
|
||||||
if !strings.HasPrefix(source, filepath.Clean(allowed)) {
|
if !strings.HasPrefix(source, filepath.Clean(allowed)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
mounted = true
|
mounted = true
|
||||||
mounts = append(mounts, environment.Mount{
|
mounts = append(mounts, environment.Mount{
|
||||||
Source: source,
|
Source: source,
|
||||||
Target: target,
|
Target: target,
|
||||||
ReadOnly: m.ReadOnly,
|
ReadOnly: m.ReadOnly,
|
||||||
})
|
})
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,6 @@ package server
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
|
@ -161,7 +160,7 @@ func (s *Server) HandlePowerAction(action PowerAction, waitSeconds ...int) error
|
||||||
|
|
||||||
return s.Environment.Start(s.Context())
|
return s.Environment.Start(s.Context())
|
||||||
case PowerActionTerminate:
|
case PowerActionTerminate:
|
||||||
return s.Environment.Terminate(s.Context(), os.Kill)
|
return s.Environment.Terminate(s.Context(), "SIGKILL")
|
||||||
}
|
}
|
||||||
|
|
||||||
return errors.New("attempting to handle unknown power action")
|
return errors.New("attempting to handle unknown power action")
|
||||||
|
|
|
@ -147,6 +147,7 @@ func (s *Server) Context() context.Context {
|
||||||
// server instance.
|
// server instance.
|
||||||
func (s *Server) GetEnvironmentVariables() []string {
|
func (s *Server) GetEnvironmentVariables() []string {
|
||||||
out := []string{
|
out := []string{
|
||||||
|
// TODO: allow this to be overridden by the user.
|
||||||
fmt.Sprintf("TZ=%s", config.Get().System.Timezone),
|
fmt.Sprintf("TZ=%s", config.Get().System.Timezone),
|
||||||
fmt.Sprintf("STARTUP=%s", s.Config().Invocation),
|
fmt.Sprintf("STARTUP=%s", s.Config().Invocation),
|
||||||
fmt.Sprintf("SERVER_MEMORY=%d", s.MemoryLimit()),
|
fmt.Sprintf("SERVER_MEMORY=%d", s.MemoryLimit()),
|
||||||
|
|
|
@ -35,8 +35,8 @@ type Archive struct {
|
||||||
func NewArchive(t *Transfer, size uint64) *Archive {
|
func NewArchive(t *Transfer, size uint64) *Archive {
|
||||||
return &Archive{
|
return &Archive{
|
||||||
archive: &filesystem.Archive{
|
archive: &filesystem.Archive{
|
||||||
BasePath: t.Server.Filesystem().Path(),
|
Filesystem: t.Server.Filesystem(),
|
||||||
Progress: progress.NewProgress(size),
|
Progress: progress.NewProgress(size),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@ func (s *Server) SyncWithEnvironment() {
|
||||||
Mounts: s.Mounts(),
|
Mounts: s.Mounts(),
|
||||||
Allocations: cfg.Allocations,
|
Allocations: cfg.Allocations,
|
||||||
Limits: cfg.Build,
|
Limits: cfg.Build,
|
||||||
|
Labels: cfg.Labels,
|
||||||
})
|
})
|
||||||
|
|
||||||
// For Docker specific environments we also want to update the configured image
|
// For Docker specific environments we also want to update the configured image
|
||||||
|
|
|
@ -2,7 +2,6 @@ package sftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -122,7 +121,7 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
|
||||||
if !h.can(permission) {
|
if !h.can(permission) {
|
||||||
return nil, sftp.ErrSSHFxPermissionDenied
|
return nil, sftp.ErrSSHFxPermissionDenied
|
||||||
}
|
}
|
||||||
f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
|
f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_TRUNC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system")
|
l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system")
|
||||||
return nil, sftp.ErrSSHFxFailure
|
return nil, sftp.ErrSSHFxFailure
|
||||||
|
@ -220,16 +219,8 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
||||||
if !h.can(PermissionFileCreate) {
|
if !h.can(PermissionFileCreate) {
|
||||||
return sftp.ErrSSHFxPermissionDenied
|
return sftp.ErrSSHFxPermissionDenied
|
||||||
}
|
}
|
||||||
source, err := h.fs.SafePath(request.Filepath)
|
if err := h.fs.Symlink(request.Filepath, request.Target); err != nil {
|
||||||
if err != nil {
|
l.WithField("target", request.Target).WithField("error", err).Error("failed to create symlink")
|
||||||
return sftp.ErrSSHFxNoSuchFile
|
|
||||||
}
|
|
||||||
target, err := h.fs.SafePath(request.Target)
|
|
||||||
if err != nil {
|
|
||||||
return sftp.ErrSSHFxNoSuchFile
|
|
||||||
}
|
|
||||||
if err := os.Symlink(source, target); err != nil {
|
|
||||||
l.WithField("target", target).WithField("error", err).Error("failed to create symlink")
|
|
||||||
return sftp.ErrSSHFxFailure
|
return sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
@ -274,16 +265,12 @@ func (h *Handler) Filelist(request *sftp.Request) (sftp.ListerAt, error) {
|
||||||
|
|
||||||
switch request.Method {
|
switch request.Method {
|
||||||
case "List":
|
case "List":
|
||||||
p, err := h.fs.SafePath(request.Filepath)
|
entries, err := h.fs.ReadDirStat(request.Filepath)
|
||||||
if err != nil {
|
|
||||||
return nil, sftp.ErrSSHFxNoSuchFile
|
|
||||||
}
|
|
||||||
files, err := ioutil.ReadDir(p)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory")
|
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory")
|
||||||
return nil, sftp.ErrSSHFxFailure
|
return nil, sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
return ListerAt(files), nil
|
return ListerAt(entries), nil
|
||||||
case "Stat":
|
case "Stat":
|
||||||
st, err := h.fs.Stat(request.Filepath)
|
st, err := h.fs.Stat(request.Filepath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -68,6 +68,21 @@ func (c *SFTPServer) Run() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
conf := &ssh.ServerConfig{
|
conf := &ssh.ServerConfig{
|
||||||
|
Config: ssh.Config{
|
||||||
|
KeyExchanges: []string{
|
||||||
|
"curve25519-sha256", "curve25519-sha256@libssh.org",
|
||||||
|
"ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521",
|
||||||
|
"diffie-hellman-group14-sha256",
|
||||||
|
},
|
||||||
|
Ciphers: []string{
|
||||||
|
"aes128-gcm@openssh.com",
|
||||||
|
"chacha20-poly1305@openssh.com",
|
||||||
|
"aes128-ctr", "aes192-ctr", "aes256-ctr",
|
||||||
|
},
|
||||||
|
MACs: []string{
|
||||||
|
"hmac-sha2-256-etm@openssh.com", "hmac-sha2-256",
|
||||||
|
},
|
||||||
|
},
|
||||||
NoClientAuth: false,
|
NoClientAuth: false,
|
||||||
MaxAuthTries: 6,
|
MaxAuthTries: 6,
|
||||||
PasswordCallback: func(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
|
PasswordCallback: func(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
|
||||||
|
@ -92,7 +107,7 @@ func (c *SFTPServer) Run() error {
|
||||||
go func(conn net.Conn) {
|
go func(conn net.Conn) {
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
if err := c.AcceptInbound(conn, conf); err != nil {
|
if err := c.AcceptInbound(conn, conf); err != nil {
|
||||||
log.WithField("error", err).Error("sftp: failed to accept inbound connection")
|
log.WithField("error", err).WithField("ip", conn.RemoteAddr().String()).Error("sftp: failed to accept inbound connection")
|
||||||
}
|
}
|
||||||
}(conn)
|
}(conn)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,16 @@ func MutexLocked(m *sync.RWMutex) bool {
|
||||||
|
|
||||||
state := v.FieldByName("w").FieldByName("state")
|
state := v.FieldByName("w").FieldByName("state")
|
||||||
|
|
||||||
return state.Int()&1 == 1 || v.FieldByName("readerCount").Int() > 0
|
readerCountField := v.FieldByName("readerCount")
|
||||||
|
// go1.20 changed readerCount to an atomic
|
||||||
|
// ref; https://github.com/golang/go/commit/e509452727b469d89a3fc4a7d1cbf9d3f110efee
|
||||||
|
var readerCount int64
|
||||||
|
if readerCountField.Kind() == reflect.Struct {
|
||||||
|
readerCount = readerCountField.FieldByName("v").Int()
|
||||||
|
} else {
|
||||||
|
readerCount = readerCountField.Int()
|
||||||
|
}
|
||||||
|
return state.Int()&1 == 1 || readerCount > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSink(t *testing.T) {
|
func TestSink(t *testing.T) {
|
||||||
|
|
|
@ -1,29 +0,0 @@
|
||||||
package system
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ipTrimRegex = regexp.MustCompile(`(:\d*)?$`)
|
|
||||||
|
|
||||||
const characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
|
|
||||||
|
|
||||||
// RandomString generates a random string of alpha-numeric characters using a
|
|
||||||
// pseudo-random number generator. The output of this function IS NOT cryptographically
|
|
||||||
// secure, it is used solely for generating random strings outside a security context.
|
|
||||||
func RandomString(n int) string {
|
|
||||||
var b strings.Builder
|
|
||||||
b.Grow(n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
b.WriteByte(characters[rand.Intn(len(characters))])
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrimIPSuffix removes the internal port value from an IP address to ensure we're only
|
|
||||||
// ever working directly with the IP address.
|
|
||||||
func TrimIPSuffix(s string) string {
|
|
||||||
return ipTrimRegex.ReplaceAllString(s, "")
|
|
||||||
}
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
|
|
||||||
"github.com/acobaugh/osrelease"
|
"github.com/acobaugh/osrelease"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/system"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/docker/docker/pkg/parsers/kernel"
|
"github.com/docker/docker/pkg/parsers/kernel"
|
||||||
)
|
)
|
||||||
|
@ -121,21 +122,22 @@ func GetSystemInformation() (*Information, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDockerInfo(ctx context.Context) (types.Version, types.Info, error) {
|
func GetDockerInfo(ctx context.Context) (types.Version, system.Info, error) {
|
||||||
// TODO: find a way to re-use the client from the docker environment.
|
// TODO: find a way to re-use the client from the docker environment.
|
||||||
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, system.Info{}, err
|
||||||
}
|
}
|
||||||
|
defer c.Close()
|
||||||
|
|
||||||
dockerVersion, err := c.ServerVersion(ctx)
|
dockerVersion, err := c.ServerVersion(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, system.Info{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dockerInfo, err := c.Info(ctx)
|
dockerInfo, err := c.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return types.Version{}, types.Info{}, err
|
return types.Version{}, system.Info{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return dockerVersion, dockerInfo, nil
|
return dockerVersion, dockerInfo, nil
|
||||||
|
|
Loading…
Reference in New Issue
Block a user