diff --git a/.envrc b/.envrc
new file mode 100644
index 0000000..3550a30
--- /dev/null
+++ b/.envrc
@@ -0,0 +1 @@
+use flake
diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml
index 291d796..0d06462 100644
--- a/.github/workflows/push.yaml
+++ b/.github/workflows/push.yaml
@@ -16,7 +16,7 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-22.04]
- go: ["1.20.12", "1.21.5"]
+ go: ["1.21.9", "1.22.2"]
goos: [linux]
goarch: [amd64, arm64]
@@ -62,14 +62,14 @@ jobs:
- name: Upload Release Artifact
uses: actions/upload-artifact@v4
- if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') && matrix.go == '1.20.12' }}
+ if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') && matrix.go == '1.21.8' }}
with:
name: wings_linux_${{ matrix.goarch }}
path: dist/wings
- name: Upload Debug Artifact
uses: actions/upload-artifact@v4
- if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') && matrix.go == '1.20.12' }}
+ if: ${{ (github.ref == 'refs/heads/develop' || github.event_name == 'pull_request') && matrix.go == '1.21.8' }}
with:
name: wings_linux_${{ matrix.goarch }}_debug
path: dist/wings_debug
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 31e369a..39d3aa4 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -17,7 +17,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
- go-version: "1.20.12"
+ go-version: "1.21.9"
- name: Build release binaries
env:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7641584..b262cfe 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,23 @@
# Changelog
+## v1.11.11
+### Fixed
+* Backups missing content when a `.pteroignore` file is used
+* Archives originating from a subdirectory not containing any files ([#5030](https://github.com/pterodactyl/panel/issues/5030))
+
+## v1.11.10
+### Fixed
+* Archives randomly ignoring files and directories ([#5027](https://github.com/pterodactyl/panel/issues/5027))
+* Crash when deleting or transferring a server ([#5028](https://github.com/pterodactyl/panel/issues/5028))
+
+## v1.11.9
+### Changed
+* Release binaries are now built with Go 1.21.8
+* Updated Go dependencies
+
+### Fixed
+* [CVE-2024-27102](https://www.cve.org/CVERecord?id=CVE-2024-27102)
+
## v1.11.8
### Changed
* Release binaries are now built with Go 1.20.10 (resolves [CVE-2023-44487](https://www.cve.org/CVERecord?id=CVE-2023-44487))
diff --git a/Dockerfile b/Dockerfile
index 832e9c9..9750188 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
# Stage 1 (Build)
-FROM golang:1.20.12-alpine AS builder
+FROM golang:1.21.9-alpine AS builder
ARG VERSION
RUN apk add --update --no-cache git make
diff --git a/README.md b/README.md
index cc5e283..7c853c9 100644
--- a/README.md
+++ b/README.md
@@ -15,15 +15,17 @@ dependencies, and allowing users to authenticate with the same credentials they
## Sponsors
-I would like to extend my sincere thanks to the following sponsors for helping find Pterodactyl's development.
+I would like to extend my sincere thanks to the following sponsors for helping fund Pterodactyl's development.
[Interested in becoming a sponsor?](https://github.com/sponsors/matthewpi)
-| Company | About |
-|-----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
-| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
-| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
-| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa. |
+| Company | About |
+|-----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [**Aussie Server Hosts**](https://aussieserverhosts.com/) | No frills Australian Owned and operated High Performance Server hosting for some of the most demanding games serving Australia and New Zealand. |
+| [**BisectHosting**](https://www.bisecthosting.com/) | BisectHosting provides Minecraft, Valheim and other server hosting services with the highest reliability and lightning fast support since 2012. |
+| [**MineStrator**](https://minestrator.com/) | Looking for the most highend French hosting company for your minecraft server? More than 24,000 members on our discord trust us. Give us a try! |
+| [**VibeGAMES**](https://vibegames.net/) | VibeGAMES is a game server provider that specializes in DDOS protection for the games we offer. We have multiple locations in the US, Brazil, France, Germany, Singapore, Australia and South Africa. |
+| [**HostEZ**](https://hostez.io) | US & EU Rust & Minecraft Hosting. DDoS Protected bare metal, VPS and colocation with low latency, high uptime and maximum availability. EZ! |
+| [**Blueprint**](https://blueprint.zip/?pterodactyl=true) | Create and install Pterodactyl addons and themes with the growing Blueprint framework - the package-manager for Pterodactyl. Use multiple modifications at once without worrying about conflicts and make use of the large extension ecosystem. |
## Documentation
diff --git a/cmd/diagnostics.go b/cmd/diagnostics.go
index aac9d80..c35bac0 100644
--- a/cmd/diagnostics.go
+++ b/cmd/diagnostics.go
@@ -229,8 +229,8 @@ func uploadToHastebin(hbUrl, content string) (string, error) {
return "", err
}
u.Path = path.Join(u.Path, "documents")
- res, err := http.Post(u.String(), "plain/text", r)
- if err != nil || res.StatusCode != 200 {
+ res, err := http.Post(u.String(), "text/plain", r)
+ if err != nil || res.StatusCode < 200 || res.StatusCode >= 300 {
fmt.Println("Failed to upload report to ", u.String(), err)
return "", err
}
diff --git a/config/config.go b/config/config.go
index 5e2f494..e0d9d99 100644
--- a/config/config.go
+++ b/config/config.go
@@ -12,6 +12,7 @@ import (
"regexp"
"strings"
"sync"
+ "sync/atomic"
"text/template"
"time"
@@ -20,6 +21,7 @@ import (
"github.com/apex/log"
"github.com/creasty/defaults"
"github.com/gbrlsnchs/jwt/v3"
+ "golang.org/x/sys/unix"
"gopkg.in/yaml.v2"
"github.com/Tech-Gamer/nwy-wings/system"
@@ -87,7 +89,7 @@ type ApiConfiguration struct {
// Determines if functionality for allowing remote download of files into server directories
// is enabled on this instance. If set to "true" remote downloads will not be possible for
// servers.
- DisableRemoteDownload bool `json:"disable_remote_download" yaml:"disable_remote_download"`
+ DisableRemoteDownload bool `json:"-" yaml:"disable_remote_download"`
// The maximum size for files uploaded through the Panel in MB.
UploadLimit int64 `default:"100" json:"upload_limit" yaml:"upload_limit"`
@@ -121,23 +123,23 @@ type RemoteQueryConfiguration struct {
// SystemConfiguration defines basic system configuration settings.
type SystemConfiguration struct {
// The root directory where all of the pterodactyl data is stored at.
- RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"`
+ RootDirectory string `default:"/var/lib/pterodactyl" json:"-" yaml:"root_directory"`
// Directory where logs for server installations and other wings events are logged.
- LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"`
+ LogDirectory string `default:"/var/log/pterodactyl" json:"-" yaml:"log_directory"`
// Directory where the server data is stored at.
- Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"`
+ Data string `default:"/var/lib/pterodactyl/volumes" json:"-" yaml:"data"`
// Directory where server archives for transferring will be stored.
- ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"`
+ ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" json:"-" yaml:"archive_directory"`
// Directory where local backups will be stored on the machine.
- BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"`
+ BackupDirectory string `default:"/var/lib/pterodactyl/backups" json:"-" yaml:"backup_directory"`
// TmpDirectory specifies where temporary files for Pterodactyl installation processes
// should be created. This supports environments running docker-in-docker.
- TmpDirectory string `default:"/tmp/pterodactyl" yaml:"tmp_directory"`
+ TmpDirectory string `default:"/tmp/pterodactyl" json:"-" yaml:"tmp_directory"`
// The user that should own all of the server files, and be used for containers.
Username string `default:"pterodactyl" yaml:"username"`
@@ -209,6 +211,8 @@ type SystemConfiguration struct {
Backups Backups `yaml:"backups"`
Transfers Transfers `yaml:"transfers"`
+
+ OpenatMode string `default:"auto" yaml:"openat_mode"`
}
type CrashDetection struct {
@@ -302,7 +306,7 @@ type Configuration struct {
// The location where the panel is running that this daemon should connect to
// to collect data and send events.
- PanelLocation string `json:"remote" yaml:"remote"`
+ PanelLocation string `json:"-" yaml:"remote"`
RemoteQuery RemoteQueryConfiguration `json:"remote_query" yaml:"remote_query"`
// AllowedMounts is a list of allowed host-system mount points.
@@ -671,3 +675,36 @@ func getSystemName() (string, error) {
}
return release["ID"], nil
}
+
+var (
+ openat2 atomic.Bool
+ openat2Set atomic.Bool
+)
+
+func UseOpenat2() bool {
+ if openat2Set.Load() {
+ return openat2.Load()
+ }
+ defer openat2Set.Store(true)
+
+ c := Get()
+ openatMode := c.System.OpenatMode
+ switch openatMode {
+ case "openat2":
+ openat2.Store(true)
+ return true
+ case "openat":
+ openat2.Store(false)
+ return false
+ default:
+ fd, err := unix.Openat2(unix.AT_FDCWD, "/", &unix.OpenHow{})
+ if err != nil {
+ log.WithError(err).Warn("error occurred while checking for openat2 support, falling back to openat")
+ openat2.Store(false)
+ return false
+ }
+ _ = unix.Close(fd)
+ openat2.Store(true)
+ return true
+ }
+}
diff --git a/config/config_docker.go b/config/config_docker.go
index dafe6bf..74bfdaf 100644
--- a/config/config_docker.go
+++ b/config/config_docker.go
@@ -4,8 +4,8 @@ import (
"encoding/base64"
"sort"
- "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/registry"
"github.com/goccy/go-json"
)
@@ -115,7 +115,7 @@ type RegistryConfiguration struct {
// Base64 returns the authentication for a given registry as a base64 encoded
// string value.
func (c RegistryConfiguration) Base64() (string, error) {
- b, err := json.Marshal(types.AuthConfig{
+ b, err := json.Marshal(registry.AuthConfig{
Username: c.Username,
Password: c.Password,
})
diff --git a/flake.lock b/flake.lock
new file mode 100644
index 0000000..e25bd2f
--- /dev/null
+++ b/flake.lock
@@ -0,0 +1,85 @@
+{
+ "nodes": {
+ "flake-parts": {
+ "inputs": {
+ "nixpkgs-lib": "nixpkgs-lib"
+ },
+ "locked": {
+ "lastModified": 1706830856,
+ "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=",
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f",
+ "type": "github"
+ },
+ "original": {
+ "owner": "hercules-ci",
+ "repo": "flake-parts",
+ "type": "github"
+ }
+ },
+ "nixpkgs": {
+ "locked": {
+ "lastModified": 1707956935,
+ "narHash": "sha256-ZL2TrjVsiFNKOYwYQozpbvQSwvtV/3Me7Zwhmdsfyu4=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "a4d4fe8c5002202493e87ec8dbc91335ff55552c",
+ "type": "github"
+ },
+ "original": {
+ "owner": "NixOS",
+ "ref": "nixos-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "nixpkgs-lib": {
+ "locked": {
+ "dir": "lib",
+ "lastModified": 1706550542,
+ "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=",
+ "owner": "NixOS",
+ "repo": "nixpkgs",
+ "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652",
+ "type": "github"
+ },
+ "original": {
+ "dir": "lib",
+ "owner": "NixOS",
+ "ref": "nixos-unstable",
+ "repo": "nixpkgs",
+ "type": "github"
+ }
+ },
+ "root": {
+ "inputs": {
+ "flake-parts": "flake-parts",
+ "nixpkgs": "nixpkgs",
+ "treefmt-nix": "treefmt-nix"
+ }
+ },
+ "treefmt-nix": {
+ "inputs": {
+ "nixpkgs": [
+ "nixpkgs"
+ ]
+ },
+ "locked": {
+ "lastModified": 1707300477,
+ "narHash": "sha256-qQF0fEkHlnxHcrKIMRzOETnRBksUK048MXkX0SOmxvA=",
+ "owner": "numtide",
+ "repo": "treefmt-nix",
+ "rev": "ac599dab59a66304eb511af07b3883114f061b9d",
+ "type": "github"
+ },
+ "original": {
+ "owner": "numtide",
+ "repo": "treefmt-nix",
+ "type": "github"
+ }
+ }
+ },
+ "root": "root",
+ "version": 7
+}
diff --git a/flake.nix b/flake.nix
new file mode 100644
index 0000000..a984da8
--- /dev/null
+++ b/flake.nix
@@ -0,0 +1,54 @@
+{
+ description = "Wings";
+
+ inputs = {
+ flake-parts.url = "github:hercules-ci/flake-parts";
+ nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
+
+ treefmt-nix = {
+ url = "github:numtide/treefmt-nix";
+ inputs.nixpkgs.follows = "nixpkgs";
+ };
+ };
+
+ outputs = {...} @ inputs:
+ inputs.flake-parts.lib.mkFlake {inherit inputs;} {
+ systems = ["aarch64-darwin" "aarch64-linux" "x86_64-darwin" "x86_64-linux"];
+
+ imports = [
+ inputs.treefmt-nix.flakeModule
+ ];
+
+ perSystem = {system, ...}: let
+ pkgs = import inputs.nixpkgs {inherit system;};
+ in {
+ devShells.default = pkgs.mkShell {
+ buildInputs = with pkgs; [
+ go_1_22
+ gofumpt
+ golangci-lint
+ gotools
+ ];
+ };
+
+ treefmt = {
+ projectRootFile = "flake.nix";
+
+ programs = {
+ alejandra.enable = true;
+ deadnix.enable = true;
+ gofumpt = {
+ enable = true;
+ extra = true;
+ };
+ shellcheck.enable = true;
+ shfmt = {
+ enable = true;
+ indent_size = 0; # 0 causes shfmt to use tabs
+ };
+ yamlfmt.enable = true;
+ };
+ };
+ };
+ };
+}
diff --git a/go.mod b/go.mod
index 2c5fc65..d8792c3 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/Tech-Gamer/nwy-wings
-go 1.18
+go 1.21
require (
emperror.dev/errors v0.8.1
@@ -10,27 +10,27 @@ require (
github.com/acobaugh/osrelease v0.1.0
github.com/apex/log v1.9.0
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2
- github.com/beevik/etree v1.2.0
+ github.com/beevik/etree v1.3.0
github.com/buger/jsonparser v1.1.1
- github.com/cenkalti/backoff/v4 v4.2.1
+ github.com/cenkalti/backoff/v4 v4.3.0
github.com/creasty/defaults v1.7.0
- github.com/docker/docker v24.0.7+incompatible
- github.com/docker/go-connections v0.4.0
+ github.com/docker/docker v25.0.4+incompatible
+ github.com/docker/go-connections v0.5.0
github.com/fatih/color v1.16.0
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf
github.com/gabriel-vasile/mimetype v1.4.3
github.com/gammazero/workerpool v1.1.3
github.com/gbrlsnchs/jwt/v3 v3.0.1
github.com/gin-gonic/gin v1.9.1
- github.com/glebarez/sqlite v1.10.0
+ github.com/glebarez/sqlite v1.11.0
github.com/go-co-op/gocron v1.37.0
github.com/goccy/go-json v0.10.2
- github.com/google/uuid v1.5.0
+ github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.5.1
github.com/iancoleman/strcase v0.3.0
github.com/icza/dyno v0.0.0-20230330125955-09f820a8d9c0
github.com/juju/ratelimit v1.0.2
- github.com/karrick/godirwalk v1.17.0
+ github.com/klauspost/compress v1.17.8
github.com/klauspost/pgzip v1.2.6
github.com/magiconair/properties v1.8.7
github.com/mattn/go-colorable v0.1.13
@@ -40,50 +40,54 @@ require (
github.com/pkg/sftp v1.13.6
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
github.com/spf13/cobra v1.8.0
- github.com/stretchr/testify v1.8.4
- golang.org/x/crypto v0.17.0
- golang.org/x/sync v0.5.0
+ github.com/stretchr/testify v1.9.0
+ golang.org/x/crypto v0.22.0
+ golang.org/x/sync v0.7.0
+ golang.org/x/sys v0.19.0
gopkg.in/ini.v1 v1.67.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
- gorm.io/gorm v1.25.5
+ gorm.io/gorm v1.25.9
)
require (
github.com/Microsoft/go-winio v0.6.1 // indirect
- github.com/Microsoft/hcsshim v0.11.4 // indirect
- github.com/andybalholm/brotli v1.0.6 // indirect
+ github.com/Microsoft/hcsshim v0.12.2 // indirect
+ github.com/andybalholm/brotli v1.1.0 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
- github.com/bodgit/sevenzip v1.4.5 // indirect
+ github.com/bodgit/sevenzip v1.5.1 // indirect
github.com/bodgit/windows v1.0.1 // indirect
- github.com/bytedance/sonic v1.10.2 // indirect
+ github.com/bytedance/sonic v1.11.3 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
github.com/chenzhuoyu/iasm v0.9.1 // indirect
+ github.com/containerd/log v0.1.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/distribution/reference v0.5.0 // indirect
- github.com/docker/distribution v2.8.3+incompatible // indirect
+ github.com/distribution/reference v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/gammazero/deque v0.2.1 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
- github.com/glebarez/go-sqlite v1.21.2 // indirect
+ github.com/glebarez/go-sqlite v1.22.0 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
- github.com/go-playground/validator/v10 v10.16.0 // indirect
+ github.com/go-playground/validator/v10 v10.19.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
- github.com/klauspost/compress v1.17.4 // indirect
- github.com/klauspost/cpuid/v2 v2.2.6 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/kr/fs v0.1.0 // indirect
- github.com/leodido/go-urn v1.2.4 // indirect
+ github.com/leodido/go-urn v1.4.0 // indirect
github.com/magefile/mage v1.15.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
@@ -91,11 +95,12 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
+ github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
- github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
- github.com/pelletier/go-toml/v2 v2.1.1 // indirect
- github.com/pierrec/lz4/v4 v4.1.19 // indirect
+ github.com/opencontainers/image-spec v1.1.0 // indirect
+ github.com/pelletier/go-toml/v2 v2.2.0 // indirect
+ github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
@@ -105,23 +110,28 @@ require (
github.com/therootcompany/xz v1.0.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
- github.com/ulikunitz/xz v0.5.11 // indirect
+ github.com/ulikunitz/xz v0.5.12 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect
+ go.opentelemetry.io/otel v1.25.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 // indirect
+ go.opentelemetry.io/otel/metric v1.25.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.24.0 // indirect
+ go.opentelemetry.io/otel/trace v1.25.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
- golang.org/x/arch v0.6.0 // indirect
- golang.org/x/mod v0.14.0 // indirect
- golang.org/x/net v0.19.0 // indirect
- golang.org/x/sys v0.15.0 // indirect
- golang.org/x/term v0.15.0 // indirect
+ golang.org/x/arch v0.7.0 // indirect
+ golang.org/x/mod v0.17.0 // indirect
+ golang.org/x/net v0.24.0 // indirect
+ golang.org/x/term v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect
- golang.org/x/tools v0.16.1 // indirect
+ golang.org/x/tools v0.20.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
- google.golang.org/protobuf v1.31.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gotest.tools/v3 v3.0.2 // indirect
- modernc.org/libc v1.38.0 // indirect
+ modernc.org/libc v1.49.3 // indirect
modernc.org/mathutil v1.6.0 // indirect
- modernc.org/memory v1.7.2 // indirect
- modernc.org/sqlite v1.28.0 // indirect
+ modernc.org/memory v1.8.0 // indirect
+ modernc.org/sqlite v1.29.6 // indirect
)
diff --git a/go.sum b/go.sum
index ab64d26..64829c6 100644
--- a/go.sum
+++ b/go.sum
@@ -27,16 +27,16 @@ github.com/Jeffail/gabs/v2 v2.7.0 h1:Y2edYaTcE8ZpRsR2AtmPu5xQdFDIthFG0jYhu5PY8kg
github.com/Jeffail/gabs/v2 v2.7.0/go.mod h1:dp5ocw1FvBBQYssgHsG7I1WYsiLRtkUaB1FEtSwvNUw=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
-github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
+github.com/Microsoft/hcsshim v0.12.2 h1:AcXy+yfRvrx20g9v7qYaJv5Rh+8GaHOS6b8G6Wx/nKs=
+github.com/Microsoft/hcsshim v0.12.2/go.mod h1:RZV12pcHCXQ42XnlQ3pz6FZfmrC1C+R4gaOHhRNML1g=
github.com/NYTimes/logrotate v1.0.0 h1:6jFGbon6jOtpy3t3kwZZKS4Gdmf1C/Wv5J4ll4Xn5yk=
github.com/NYTimes/logrotate v1.0.0/go.mod h1:GxNz1cSw1c6t99PXoZlw+nm90H6cyQyrH66pjVv7x88=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s=
github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w=
github.com/acobaugh/osrelease v0.1.0 h1:Yb59HQDGGNhCj4suHaFQQfBps5wyoKLSSX/J/+UifRE=
github.com/acobaugh/osrelease v0.1.0/go.mod h1:4bFEs0MtgHNHBrmHCt67gNisnabCRAlzdVasCEGHTWY=
-github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
-github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
+github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0=
github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA=
github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo=
@@ -46,24 +46,22 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
-github.com/beevik/etree v1.2.0 h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw=
-github.com/beevik/etree v1.2.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc=
+github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU=
+github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc=
github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU=
github.com/bodgit/plumbing v1.3.0/go.mod h1:JOTb4XiRu5xfnmdnDJo6GmSbSbtSyufrsyZFByMtKEs=
-github.com/bodgit/sevenzip v1.4.4 h1:cI/PfeqDXESY6mwBn7Q9LyxlVkHaOPyaDev3mR7qzSA=
-github.com/bodgit/sevenzip v1.4.4/go.mod h1:G/5VOeQBGI2TsdyF88xpszj2SRvvD6srHdq9gRBQ9Is=
-github.com/bodgit/sevenzip v1.4.5 h1:HFJQ+nbjppfyf2xbQEJBbmVo+o2kTg1FXV4i7YOx87s=
-github.com/bodgit/sevenzip v1.4.5/go.mod h1:LAcAg/UQzyjzCQSGBPZFYzoiHMfT6Gk+3tMSjUk3foY=
+github.com/bodgit/sevenzip v1.5.1 h1:rVj0baZsooZFy64DJN0zQogPzhPrT8BQ8TTRd1H4WHw=
+github.com/bodgit/sevenzip v1.5.1/go.mod h1:Q3YMySuVWq6pyGEolyIE98828lOfEoeWg5zeH6x22rc=
github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
-github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZFE=
-github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/bytedance/sonic v1.11.3 h1:jRN+yEjakWh8aK5FzrciUHG8OFXK+4/KrAX/ysEtHAA=
+github.com/bytedance/sonic v1.11.3/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
+github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
+github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
@@ -76,6 +74,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -86,14 +86,12 @@ github.com/creasty/defaults v1.7.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbD
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
-github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
-github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
-github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/docker v25.0.4+incompatible h1:XITZTrq+52tZyZxUOtFIahUf3aH367FLxJzt9vZeAF8=
+github.com/docker/docker v25.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
@@ -106,6 +104,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf h1:NrF81UtW8gG2LBGkXFQFqlfNnvMt9WdB46sfdJY4oqc=
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -121,24 +121,28 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
-github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo=
-github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k=
-github.com/glebarez/sqlite v1.10.0 h1:u4gt8y7OND/cCei/NMHmfbLxF6xP2wgKcT/BJf2pYkc=
-github.com/glebarez/sqlite v1.10.0/go.mod h1:IJ+lfSOmiekhQsFTJRx/lHtGYmCdtAiTaf5wI9u5uHA=
-github.com/go-co-op/gocron v1.36.0 h1:sEmAwg57l4JWQgzaVWYfKZ+w13uHOqeOtwjo72Ll5Wc=
-github.com/go-co-op/gocron v1.36.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY=
+github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
+github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
+github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
+github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0=
github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
-github.com/go-playground/validator/v10 v10.16.0 h1:x+plE831WK4vaKHO/jpgUGsvLKIqRRkz6M78GuJAfGE=
-github.com/go-playground/validator/v10 v10.16.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
+github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4=
+github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
@@ -155,7 +159,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -165,23 +170,26 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
-github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -189,6 +197,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -211,19 +221,17 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI=
github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
-github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI=
-github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
-github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
+github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
-github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
+github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
@@ -239,8 +247,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
-github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
+github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
@@ -271,25 +279,22 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
+github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
-github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
-github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
-github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
-github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
-github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
-github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI=
-github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
-github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
-github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
-github.com/pierrec/lz4/v4 v4.1.19 h1:tYLzDnjDXh9qIxSTKHwXwOYmm9d887Y7Y1ZkyXYHAN4=
-github.com/pierrec/lz4/v4 v4.1.19/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pelletier/go-toml/v2 v2.2.0 h1:QLgLl2yMN7N+ruc31VynXs1vhMZa7CeHHejIeBAsoHo=
+github.com/pelletier/go-toml/v2 v2.2.0/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
+github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
+github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -326,6 +331,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -334,8 +340,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
@@ -350,8 +357,8 @@ github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
-github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
-github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
+github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
+github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
@@ -359,19 +366,36 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8=
+go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k=
+go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM=
+go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA=
+go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM=
+go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I=
+go.opentelemetry.io/proto/otlp v1.1.0 h1:2Di21piLrCqJ3U3eXGCTPHE9R8Nh+0uglSnOyxikMeI=
+go.opentelemetry.io/proto/otlp v1.1.0/go.mod h1:GpBHCBWiqvVLDqmHZsoMM3C5ySeKTC7ej/RNTae6MdY=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
-golang.org/x/arch v0.6.0 h1:S0JTfE48HbRj80+4tbvZDYsJ3tGv6BUU3XxyZ7CirAc=
-golang.org/x/arch v0.6.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
+golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc=
+golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -381,10 +405,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -412,8 +434,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
-golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -434,8 +456,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -449,8 +471,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -476,14 +498,14 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
+golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
+golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -527,10 +549,8 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
-golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
-golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
-golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
+golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
+golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -563,6 +583,11 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM=
+google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -570,9 +595,10 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
+google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -591,8 +617,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls=
-gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
+gorm.io/gorm v1.25.9 h1:wct0gxZIELDk8+ZqF/MVnHLkA1rvYlBWUMv2EdsK1g8=
+gorm.io/gorm v1.25.9/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -600,18 +626,30 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-modernc.org/libc v1.35.0 h1:EQ4szx6Q/QLZuysmAnI4dfRnKbAbNlENp23ruvTJ2nE=
-modernc.org/libc v1.35.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
-modernc.org/libc v1.38.0 h1:o4Lpk0zNDSdsjfEXnF1FGXWQ9PDi1NOdWcLP5n13FGo=
-modernc.org/libc v1.38.0/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
+modernc.org/cc/v4 v4.20.0 h1:45Or8mQfbUqJOG9WaxvlFYOAQO0lQ5RvqBcFCXngjxk=
+modernc.org/cc/v4 v4.20.0/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
+modernc.org/ccgo/v4 v4.16.0 h1:ofwORa6vx2FMm0916/CkZjpFPSR70VwTjUCe2Eg5BnA=
+modernc.org/ccgo/v4 v4.16.0/go.mod h1:dkNyWIjFrVIZ68DTo36vHK+6/ShBn4ysU61So6PIqCI=
+modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
+modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
+modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
+modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
+modernc.org/libc v1.49.3 h1:j2MRCRdwJI2ls/sGbeSk0t2bypOG/uvPZUsGQFDulqg=
+modernc.org/libc v1.49.3/go.mod h1:yMZuGkn7pXbKfoT/M35gFJOAEdSKdxL0q64sF7KqCDo=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
-modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
-modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
-modernc.org/sqlite v1.27.0 h1:MpKAHoyYB7xqcwnUwkuD+npwEa0fojF0B5QRbN+auJ8=
-modernc.org/sqlite v1.27.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
-modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
-modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
+modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
+modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
+modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
+modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
+modernc.org/sqlite v1.29.6 h1:0lOXGrycJPptfHDuohfYgNqoe4hu+gYuN/pKgY5XjS4=
+modernc.org/sqlite v1.29.6/go.mod h1:S02dvcmm7TnTRvGhv8IGYyLnIt7AS2KPaB1F/71p75U=
+modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
+modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
diff --git a/internal/ufs/LICENSE b/internal/ufs/LICENSE
new file mode 100644
index 0000000..287f516
--- /dev/null
+++ b/internal/ufs/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2024 Matthew Penner
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/internal/ufs/README.md b/internal/ufs/README.md
new file mode 100644
index 0000000..3f67df3
--- /dev/null
+++ b/internal/ufs/README.md
@@ -0,0 +1,17 @@
+# Filesystem
+
+## Licensing
+
+Most code in this package is licensed under `MIT` with some exceptions.
+
+The following files are licensed under `BSD-3-Clause` due to them being copied
+verbatim or derived from [Go](https://go.dev)'s source code.
+
+- [`file_posix.go`](./file_posix.go)
+- [`mkdir_unix.go`](./mkdir_unix.go)
+- [`path_unix.go`](./path_unix.go)
+- [`removeall_unix.go`](./removeall_unix.go)
+- [`stat_unix.go`](./stat_unix.go)
+- [`walk.go`](./walk.go)
+
+These changes are not associated with nor endorsed by The Go Authors.
diff --git a/internal/ufs/doc.go b/internal/ufs/doc.go
new file mode 100644
index 0000000..85ad991
--- /dev/null
+++ b/internal/ufs/doc.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
+
+// Package ufs provides an abstraction layer for performing I/O on filesystems.
+// This package is designed to be used in-place of standard `os` package I/O
+// calls, and is not designed to be used as a generic filesystem abstraction
+// like the `io/fs` package.
+//
+// The primary use-case of this package was to provide a "chroot-like" `os`
+// wrapper, so we can safely sandbox I/O operations within a directory and
+// use untrusted arbitrary paths.
+package ufs
diff --git a/internal/ufs/error.go b/internal/ufs/error.go
new file mode 100644
index 0000000..0ba5262
--- /dev/null
+++ b/internal/ufs/error.go
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
+
+package ufs
+
+import (
+ "errors"
+ iofs "io/fs"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+var (
+ // ErrIsDirectory is an error for when an operation that operates only on
+ // files is given a path to a directory.
+ ErrIsDirectory = errors.New("is a directory")
+ // ErrNotDirectory is an error for when an operation that operates only on
+ // directories is given a path to a file.
+ ErrNotDirectory = errors.New("not a directory")
+ // ErrBadPathResolution is an error for when a sand-boxed filesystem
+ // resolves a given path to a forbidden location.
+ ErrBadPathResolution = errors.New("bad path resolution")
+ // ErrNotRegular is an error for when an operation that operates only on
+ // regular files is passed something other than a regular file.
+ ErrNotRegular = errors.New("not a regular file")
+
+ // ErrClosed is an error for when an entry was accessed after being closed.
+ ErrClosed = iofs.ErrClosed
+ // ErrInvalid is an error for when an invalid argument was used.
+ ErrInvalid = iofs.ErrInvalid
+ // ErrExist is an error for when an entry already exists.
+ ErrExist = iofs.ErrExist
+ // ErrNotExist is an error for when an entry does not exist.
+ ErrNotExist = iofs.ErrNotExist
+ // ErrPermission is an error for when the required permissions to perform an
+ // operation are missing.
+ ErrPermission = iofs.ErrPermission
+)
+
+// LinkError records an error during a link or symlink or rename
+// system call and the paths that caused it.
+type LinkError = os.LinkError
+
+// PathError records an error and the operation and file path that caused it.
+type PathError = iofs.PathError
+
+// SyscallError records an error from a specific system call.
+type SyscallError = os.SyscallError
+
+// NewSyscallError returns, as an error, a new SyscallError
+// with the given system call name and error details.
+// As a convenience, if err is nil, NewSyscallError returns nil.
+func NewSyscallError(syscall string, err error) error {
+ return os.NewSyscallError(syscall, err)
+}
+
+// convertErrorType converts errors into our custom errors to ensure consistent
+// error values.
+func convertErrorType(err error) error {
+ if err == nil {
+ return nil
+ }
+ var pErr *PathError
+ switch {
+ case errors.As(err, &pErr):
+ switch {
+ // File exists
+ case errors.Is(pErr.Err, unix.EEXIST):
+ return &PathError{
+ Op: pErr.Op,
+ Path: pErr.Path,
+ Err: ErrExist,
+ }
+ // Is a directory
+ case errors.Is(pErr.Err, unix.EISDIR):
+ return &PathError{
+ Op: pErr.Op,
+ Path: pErr.Path,
+ Err: ErrIsDirectory,
+ }
+ // Not a directory
+ case errors.Is(pErr.Err, unix.ENOTDIR):
+ return &PathError{
+ Op: pErr.Op,
+ Path: pErr.Path,
+ Err: ErrNotDirectory,
+ }
+ // No such file or directory
+ case errors.Is(pErr.Err, unix.ENOENT):
+ return &PathError{
+ Op: pErr.Op,
+ Path: pErr.Path,
+ Err: ErrNotExist,
+ }
+ // Operation not permitted
+ case errors.Is(pErr.Err, unix.EPERM):
+ return &PathError{
+ Op: pErr.Op,
+ Path: pErr.Path,
+ Err: ErrPermission,
+ }
+ // Invalid cross-device link
+ case errors.Is(pErr.Err, unix.EXDEV):
+ return &PathError{
+ Op: pErr.Op,
+ Path: pErr.Path,
+ Err: ErrBadPathResolution,
+ }
+ // Too many levels of symbolic links
+ case errors.Is(pErr.Err, unix.ELOOP):
+ return &PathError{
+ Op: pErr.Op,
+ Path: pErr.Path,
+ Err: ErrBadPathResolution,
+ }
+ }
+ }
+ return err
+}
diff --git a/internal/ufs/file.go b/internal/ufs/file.go
new file mode 100644
index 0000000..bcdd189
--- /dev/null
+++ b/internal/ufs/file.go
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
+
+package ufs
+
+import (
+ "io"
+ iofs "io/fs"
+
+ "golang.org/x/sys/unix"
+)
+
+// DirEntry is an entry read from a directory.
+type DirEntry = iofs.DirEntry
+
+// File describes readable and/or writable file from a Filesystem.
+type File interface {
+ // Name returns the base name of the file.
+ Name() string
+
+ // Stat returns the FileInfo structure describing the file.
+ // If there is an error, it will be of type *PathError.
+ Stat() (FileInfo, error)
+
+ // ReadDir reads the contents of the directory associated with the file f
+ // and returns a slice of DirEntry values in directory order.
+ // Subsequent calls on the same file will yield later DirEntry records in the directory.
+ //
+ // If n > 0, ReadDir returns at most n DirEntry records.
+ // In this case, if ReadDir returns an empty slice, it will return an error explaining why.
+ // At the end of a directory, the error is io.EOF.
+ //
+ // If n <= 0, ReadDir returns all the DirEntry records remaining in the directory.
+ // When it succeeds, it returns a nil error (not io.EOF).
+ ReadDir(n int) ([]DirEntry, error)
+
+ // Readdirnames reads the contents of the directory associated with file
+ // and returns a slice of up to n names of files in the directory,
+ // in directory order. Subsequent calls on the same file will yield
+ // further names.
+ //
+ // If n > 0, Readdirnames returns at most n names. In this case, if
+ // Readdirnames returns an empty slice, it will return a non-nil error
+ // explaining why. At the end of a directory, the error is io.EOF.
+ //
+ // If n <= 0, Readdirnames returns all the names from the directory in
+ // a single slice. In this case, if Readdirnames succeeds (reads all
+ // the way to the end of the directory), it returns the slice and a
+ // nil error. If it encounters an error before the end of the
+ // directory, Readdirnames returns the names read until that point and
+ // a non-nil error.
+ Readdirnames(n int) (names []string, err error)
+
+ // Fd returns the integer Unix file descriptor referencing the open file.
+ // If f is closed, the file descriptor becomes invalid.
+ // If f is garbage collected, a finalizer may close the file descriptor,
+ // making it invalid; see runtime.SetFinalizer for more information on when
+ // a finalizer might be run. On Unix systems this will cause the SetDeadline
+ // methods to stop working.
+ // Because file descriptors can be reused, the returned file descriptor may
+ // only be closed through the Close method of f, or by its finalizer during
+ // garbage collection. Otherwise, during garbage collection the finalizer
+ // may close an unrelated file descriptor with the same (reused) number.
+ //
+ // As an alternative, see the f.SyscallConn method.
+ Fd() uintptr
+
+ // Truncate changes the size of the file.
+ // It does not change the I/O offset.
+ // If there is an error, it will be of type *PathError.
+ Truncate(size int64) error
+
+ io.Closer
+
+ io.Reader
+ io.ReaderAt
+ io.ReaderFrom
+
+ io.Writer
+ io.WriterAt
+
+ io.Seeker
+}
+
+// FileInfo describes a file and is returned by Stat and Lstat.
+type FileInfo = iofs.FileInfo
+
+// FileMode represents a file's mode and permission bits.
+// The bits have the same definition on all systems, so that
+// information about files can be moved from one system
+// to another portably. Not all bits apply to all systems.
+// The only required bit is ModeDir for directories.
+type FileMode = iofs.FileMode
+
+// The defined file mode bits are the most significant bits of the FileMode.
+// The nine least-significant bits are the standard Unix rwxrwxrwx permissions.
+// The values of these bits should be considered part of the public API and
+// may be used in wire protocols or disk representations: they must not be
+// changed, although new bits might be added.
+const (
+ // ModeDir represents a directory.
+ // d: is a directory
+ ModeDir = iofs.ModeDir
+ // ModeAppend represents an append-only file.
+ // a: append-only
+ ModeAppend = iofs.ModeAppend
+ // ModeExclusive represents an exclusive file.
+ // l: exclusive use
+ ModeExclusive = iofs.ModeExclusive
+ // ModeTemporary .
+ // T: temporary file; Plan 9 only.
+ ModeTemporary = iofs.ModeTemporary
+ // ModeSymlink .
+ // L: symbolic link.
+ ModeSymlink = iofs.ModeSymlink
+ // ModeDevice .
+ // D: device file.
+ ModeDevice = iofs.ModeDevice
+ // ModeNamedPipe .
+ // p: named pipe (FIFO)
+ ModeNamedPipe = iofs.ModeNamedPipe
+ // ModeSocket .
+ // S: Unix domain socket.
+ ModeSocket = iofs.ModeSocket
+ // ModeSetuid .
+ // u: setuid
+ ModeSetuid = iofs.ModeSetuid
+ // ModeSetgid .
+ // g: setgid
+ ModeSetgid = iofs.ModeSetgid
+ // ModeCharDevice .
+ // c: Unix character device, when ModeDevice is set
+ ModeCharDevice = iofs.ModeCharDevice
+ // ModeSticky .
+ // t: sticky
+ ModeSticky = iofs.ModeSticky
+ // ModeIrregular .
+ // ?: non-regular file; nothing else is known about this file.
+ ModeIrregular = iofs.ModeIrregular
+
+ // ModeType .
+ ModeType = iofs.ModeType
+
+ // ModePerm .
+ // Unix permission bits, 0o777.
+ ModePerm = iofs.ModePerm
+)
+
+const (
+ // O_RDONLY opens the file read-only.
+ O_RDONLY = unix.O_RDONLY
+ // O_WRONLY opens the file write-only.
+ O_WRONLY = unix.O_WRONLY
+ // O_RDWR opens the file read-write.
+ O_RDWR = unix.O_RDWR
+ // O_APPEND appends data to the file when writing.
+ O_APPEND = unix.O_APPEND
+ // O_CREATE creates a new file if it doesn't exist.
+ O_CREATE = unix.O_CREAT
+ // O_EXCL is used with O_CREATE, file must not exist.
+ O_EXCL = unix.O_EXCL
+ // O_SYNC open for synchronous I/O.
+ O_SYNC = unix.O_SYNC
+ // O_TRUNC truncates regular writable file when opened.
+ O_TRUNC = unix.O_TRUNC
+ // O_DIRECTORY opens a directory only. If the entry is not a directory an
+ // error will be returned.
+ O_DIRECTORY = unix.O_DIRECTORY
+ // O_NOFOLLOW opens the exact path given without following symlinks.
+ O_NOFOLLOW = unix.O_NOFOLLOW
+ O_CLOEXEC = unix.O_CLOEXEC
+ O_LARGEFILE = unix.O_LARGEFILE
+)
+
+const (
+ AT_SYMLINK_NOFOLLOW = unix.AT_SYMLINK_NOFOLLOW
+ AT_REMOVEDIR = unix.AT_REMOVEDIR
+ AT_EMPTY_PATH = unix.AT_EMPTY_PATH
+)
diff --git a/internal/ufs/file_posix.go b/internal/ufs/file_posix.go
new file mode 100644
index 0000000..7ce0634
--- /dev/null
+++ b/internal/ufs/file_posix.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Code in this file was copied from `go/src/os/file_posix.go`.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the `go.LICENSE` file.
+
+//go:build unix || (js && wasm) || wasip1 || windows
+
+package ufs
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// ignoringEINTR makes a function call and repeats it if it returns an
+// EINTR error. This appears to be required even though we install all
+// signal handlers with SA_RESTART: see https://go.dev/issue/22838,
+// https://go.dev/issue/38033, https://go.dev/issue/38836,
+// https://go.dev/issue/40846. Also, https://go.dev/issue/20400 and
+// https://go.dev/issue/36644 are issues in which a signal handler is
+// installed without setting SA_RESTART. None of these are the common case,
+// but there are enough of them that it seems that we can't avoid
+// an EINTR loop.
+func ignoringEINTR(fn func() error) error {
+ for {
+ err := fn()
+ if err != unix.EINTR {
+ return err
+ }
+ }
+}
+
+// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
+func syscallMode(i FileMode) (o FileMode) {
+ o |= i.Perm()
+ if i&ModeSetuid != 0 {
+ o |= unix.S_ISUID
+ }
+ if i&ModeSetgid != 0 {
+ o |= unix.S_ISGID
+ }
+ if i&ModeSticky != 0 {
+ o |= unix.S_ISVTX
+ }
+ // No mapping for Go's ModeTemporary (plan9 only).
+ return
+}
diff --git a/internal/ufs/filesystem.go b/internal/ufs/filesystem.go
new file mode 100644
index 0000000..3fa1682
--- /dev/null
+++ b/internal/ufs/filesystem.go
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
+
+package ufs
+
+import (
+ "time"
+)
+
+// Filesystem represents a filesystem capable of performing I/O operations.
+type Filesystem interface {
+ // Chmod changes the mode of the named file to mode.
+ //
+ // If the file is a symbolic link, it changes the mode of the link's target.
+ // If there is an error, it will be of type *PathError.
+ //
+ // A different subset of the mode bits are used, depending on the
+ // operating system.
+ //
+ // On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
+ // ModeSticky are used.
+ //
+ // On Windows, only the 0200 bit (owner writable) of mode is used; it
+ // controls whether the file's read-only attribute is set or cleared.
+ // The other bits are currently unused. For compatibility with Go 1.12
+ // and earlier, use a non-zero mode. Use mode 0400 for a read-only
+ // file and 0600 for a readable+writable file.
+ //
+ // On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
+ // and ModeTemporary are used.
+ Chmod(name string, mode FileMode) error
+
+ // Chown changes the numeric uid and gid of the named file.
+ //
+ // If the file is a symbolic link, it changes the uid and gid of the link's target.
+ // A uid or gid of -1 means to not change that value.
+ // If there is an error, it will be of type *PathError.
+ //
+ // On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
+ // EPLAN9 error, wrapped in *PathError.
+ Chown(name string, uid, gid int) error
+
+ // Lchown changes the numeric uid and gid of the named file.
+ //
+ // If the file is a symbolic link, it changes the uid and gid of the link itself.
+ // If there is an error, it will be of type *PathError.
+ //
+ // On Windows, it always returns the syscall.EWINDOWS error, wrapped
+ // in *PathError.
+ Lchown(name string, uid, gid int) error
+
+ // Chtimes changes the access and modification times of the named
+ // file, similar to the Unix utime() or utimes() functions.
+ //
+ // The underlying filesystem may truncate or round the values to a
+ // less precise time unit.
+ //
+ // If there is an error, it will be of type *PathError.
+ Chtimes(name string, atime, mtime time.Time) error
+
+ // Create creates or truncates the named file. If the file already exists,
+ // it is truncated.
+ //
+ // If the file does not exist, it is created with mode 0666
+ // (before umask). If successful, methods on the returned File can
+ // be used for I/O; the associated file descriptor has mode O_RDWR.
+ // If there is an error, it will be of type *PathError.
+ Create(name string) (File, error)
+
+ // Mkdir creates a new directory with the specified name and permission
+ // bits (before umask).
+ //
+ // If there is an error, it will be of type *PathError.
+ Mkdir(name string, perm FileMode) error
+
+ // MkdirAll creates a directory named path, along with any necessary
+ // parents, and returns nil, or else returns an error.
+ //
+ // The permission bits perm (before umask) are used for all
+ // directories that MkdirAll creates.
+ // If path is already a directory, MkdirAll does nothing
+ // and returns nil.
+ MkdirAll(path string, perm FileMode) error
+
+ // Open opens the named file for reading.
+ //
+ // If successful, methods on the returned file can be used for reading; the
+ // associated file descriptor has mode O_RDONLY.
+ //
+ // If there is an error, it will be of type *PathError.
+ Open(name string) (File, error)
+
+ // OpenFile is the generalized open call; most users will use Open
+ // or Create instead. It opens the named file with specified flag
+ // (O_RDONLY etc.).
+ //
+ // If the file does not exist, and the O_CREATE flag
+ // is passed, it is created with mode perm (before umask). If successful,
+ // methods on the returned File can be used for I/O.
+ //
+ // If there is an error, it will be of type *PathError.
+ OpenFile(name string, flag int, perm FileMode) (File, error)
+
+ // ReadDir reads the named directory,
+ //
+ // returning all its directory entries sorted by filename.
+ // If an error occurs reading the directory, ReadDir returns the entries it
+ // was able to read before the error, along with the error.
+ ReadDir(name string) ([]DirEntry, error)
+
+ // Remove removes the named file or (empty) directory.
+ //
+ // If there is an error, it will be of type *PathError.
+ Remove(name string) error
+
+ // RemoveAll removes path and any children it contains.
+ //
+ // It removes everything it can but returns the first error
+ // it encounters. If the path does not exist, RemoveAll
+ // returns nil (no error).
+ //
+ // If there is an error, it will be of type *PathError.
+ RemoveAll(path string) error
+
+ // Rename renames (moves) oldpath to newpath.
+ //
+ // If newpath already exists and is not a directory, Rename replaces it.
+ // OS-specific restrictions may apply when oldpath and newpath are in different directories.
+ // Even within the same directory, on non-Unix platforms Rename is not an atomic operation.
+ //
+ // If there is an error, it will be of type *LinkError.
+ Rename(oldname, newname string) error
+
+ // Stat returns a FileInfo describing the named file.
+ //
+ // If there is an error, it will be of type *PathError.
+ Stat(name string) (FileInfo, error)
+
+ // Lstat returns a FileInfo describing the named file.
+ //
+ // If the file is a symbolic link, the returned FileInfo
+ // describes the symbolic link. Lstat makes no attempt to follow the link.
+ //
+ // If there is an error, it will be of type *PathError.
+ Lstat(name string) (FileInfo, error)
+
+ // Symlink creates newname as a symbolic link to oldname.
+ //
+ // On Windows, a symlink to a non-existent oldname creates a file symlink;
+ // if oldname is later created as a directory the symlink will not work.
+ //
+ // If there is an error, it will be of type *LinkError.
+ Symlink(oldname, newname string) error
+
+ // WalkDir walks the file tree rooted at root, calling fn for each file or
+ // directory in the tree, including root.
+ //
+ // All errors that arise visiting files and directories are filtered by fn:
+ // see the [WalkDirFunc] documentation for details.
+ //
+ // The files are walked in lexical order, which makes the output deterministic
+ // but requires WalkDir to read an entire directory into memory before proceeding
+ // to walk that directory.
+ //
+ // WalkDir does not follow symbolic links found in directories,
+ // but if root itself is a symbolic link, its target will be walked.
+ WalkDir(root string, fn WalkDirFunc) error
+}
diff --git a/internal/ufs/fs_quota.go b/internal/ufs/fs_quota.go
new file mode 100644
index 0000000..5c6e5ef
--- /dev/null
+++ b/internal/ufs/fs_quota.go
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
+
+package ufs
+
+import (
+ "sync/atomic"
+)
+
+type Quota struct {
+ // fs is the underlying filesystem that runs the actual I/O operations.
+ *UnixFS
+
+ // limit is the size limit of the filesystem.
+ //
+ // limit is atomic to allow the limit to be safely changed after the
+ // filesystem was created.
+ //
+ // A limit of `-1` disables any write operation from being performed.
+ // A limit of `0` disables any limit checking.
+ limit atomic.Int64
+
+ // usage is the current usage of the filesystem.
+ //
+ // If usage is set to `-1`, it hasn't been calculated yet.
+ usage atomic.Int64
+}
+
+func NewQuota(fs *UnixFS, limit int64) *Quota {
+ qfs := Quota{UnixFS: fs}
+ qfs.limit.Store(limit)
+ return &qfs
+}
+
+// Close closes the filesystem.
+func (fs *Quota) Close() (err error) {
+ err = fs.UnixFS.Close()
+ return
+}
+
+// Limit returns the limit of the filesystem.
+func (fs *Quota) Limit() int64 {
+ return fs.limit.Load()
+}
+
+// SetLimit returns the limit of the filesystem.
+func (fs *Quota) SetLimit(newLimit int64) int64 {
+ return fs.limit.Swap(newLimit)
+}
+
+// Usage returns the current usage of the filesystem.
+func (fs *Quota) Usage() int64 {
+ return fs.usage.Load()
+}
+
+// SetUsage updates the total usage of the filesystem.
+func (fs *Quota) SetUsage(newUsage int64) int64 {
+ return fs.usage.Swap(newUsage)
+}
+
+// Add adds `i` to the tracked usage total.
+func (fs *Quota) Add(i int64) int64 {
+ usage := fs.Usage()
+
+ // If adding `i` to the usage will put us below 0, cap it. (`i` can be negative)
+ if usage+i < 0 {
+ fs.usage.Store(0)
+ return 0
+ }
+ return fs.usage.Add(i)
+}
+
+// CanFit checks if the given size can fit in the filesystem without exceeding
+// the limit of the filesystem.
+func (fs *Quota) CanFit(size int64) bool {
+ // Get the size limit of the filesystem.
+ limit := fs.Limit()
+ switch limit {
+ case -1:
+ // A limit of -1 means no write operations are allowed.
+ return false
+ case 0:
+ // A limit of 0 means unlimited.
+ return true
+ }
+
+ // Any other limit is a value we need to check.
+ usage := fs.Usage()
+ if usage == -1 {
+ // We don't know what the current usage is yet.
+ return true
+ }
+
+ // If the current usage + the requested size are under the limit of the
+ // filesystem, allow it.
+ if usage+size <= limit {
+ return true
+ }
+
+ // Welp, the size would exceed the limit of the filesystem, deny it.
+ return false
+}
+
+func (fs *Quota) Remove(name string) error {
+ // For information on why this interface is used here, check its
+ // documentation.
+ s, err := fs.RemoveStat(name)
+ if err != nil {
+ return err
+ }
+
+ // Don't reduce the quota's usage as `name` is not a regular file.
+ if !s.Mode().IsRegular() {
+ return nil
+ }
+
+ // Remove the size of the deleted file from the quota usage.
+ fs.Add(-s.Size())
+ return nil
+}
+
+// RemoveAll removes path and any children it contains.
+//
+// It removes everything it can but returns the first error
+// it encounters. If the path does not exist, RemoveAll
+// returns nil (no error).
+//
+// If there is an error, it will be of type *PathError.
+func (fs *Quota) RemoveAll(name string) error {
+ name, err := fs.unsafePath(name)
+ if err != nil {
+ return err
+ }
+ // While removeAll internally checks this, I want to make sure we check it
+ // and return the proper error so our tests can ensure that this will never
+ // be a possibility.
+ if name == "." {
+ return &PathError{
+ Op: "removeall",
+ Path: name,
+ Err: ErrBadPathResolution,
+ }
+ }
+ return fs.removeAll(name)
+}
+
+func (fs *Quota) removeAll(path string) error {
+ return removeAll(fs, path)
+}
+
+func (fs *Quota) unlinkat(dirfd int, name string, flags int) error {
+ if flags == 0 {
+ s, err := fs.Lstatat(dirfd, name)
+ if err == nil && s.Mode().IsRegular() {
+ fs.Add(-s.Size())
+ }
+ }
+ return fs.UnixFS.unlinkat(dirfd, name, flags)
+}
diff --git a/internal/ufs/fs_unix.go b/internal/ufs/fs_unix.go
new file mode 100644
index 0000000..36ba77c
--- /dev/null
+++ b/internal/ufs/fs_unix.go
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
+
+//go:build unix
+
+package ufs
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// UnixFS is a filesystem that uses the unix package to make io calls.
+//
+// This is used for proper sand-boxing and full control over the exact syscalls
+// being performed.
+type UnixFS struct {
+ // basePath is the base path for file operations to take place in.
+ basePath string
+
+ // dirfd holds the file descriptor of BasePath and is used to ensure
+ // operations are restricted into descendants of BasePath.
+ dirfd atomic.Int64
+
+ // useOpenat2 controls whether the `openat2` syscall is used instead of the
+ // older `openat` syscall.
+ useOpenat2 bool
+}
+
+// NewUnixFS creates a new sandboxed unix filesystem. BasePath is used as the
+// sandbox path, operations on BasePath itself are not allowed, but any
+// operations on its descendants are. Symlinks pointing outside BasePath are
+// checked and prevented from enabling an escape in a non-raceable manor.
+func NewUnixFS(basePath string, useOpenat2 bool) (*UnixFS, error) {
+ basePath = strings.TrimSuffix(basePath, "/")
+ // We don't need Openat2, if we are given a basePath that is already unsafe
+ // I give up on trying to sandbox it.
+ dirfd, err := unix.Openat(AT_EMPTY_PATH, basePath, O_DIRECTORY|O_RDONLY, 0)
+ if err != nil {
+ return nil, convertErrorType(err)
+ }
+
+ fs := &UnixFS{
+ basePath: basePath,
+ useOpenat2: useOpenat2,
+ }
+ fs.dirfd.Store(int64(dirfd))
+ return fs, nil
+}
+
+// BasePath returns the base path of the UnixFS sandbox, file operations
+// pointing outside this path are prohibited and will be blocked by all
+// operations implemented by UnixFS.
+func (fs *UnixFS) BasePath() string {
+ return fs.basePath
+}
+
+// Close releases the file descriptor used to sandbox operations within the
+// base path of the filesystem.
+func (fs *UnixFS) Close() error {
+ // Once closed, change dirfd to something invalid to detect when it has been
+ // closed.
+ defer func() {
+ fs.dirfd.Store(-1)
+ }()
+ return unix.Close(int(fs.dirfd.Load()))
+}
+
+// Chmod changes the mode of the named file to mode.
+//
+// If the file is a symbolic link, it changes the mode of the link's target.
+// If there is an error, it will be of type *PathError.
+//
+// A different subset of the mode bits are used, depending on the
+// operating system.
+//
+// On Unix, the mode's permission bits, ModeSetuid, ModeSetgid, and
+// ModeSticky are used.
+//
+// On Windows, only the 0200 bit (owner writable) of mode is used; it
+// controls whether the file's read-only attribute is set or cleared.
+// The other bits are currently unused. For compatibility with Go 1.12
+// and earlier, use a non-zero mode. Use mode 0400 for a read-only
+// file and 0600 for a readable+writable file.
+//
+// On Plan 9, the mode's permission bits, ModeAppend, ModeExclusive,
+// and ModeTemporary are used.
+func (fs *UnixFS) Chmod(name string, mode FileMode) error {
+ dirfd, name, closeFd, err := fs.safePath(name)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+ return convertErrorType(unix.Fchmodat(dirfd, name, uint32(mode), 0))
+}
+
+// Chown changes the numeric uid and gid of the named file.
+//
+// If the file is a symbolic link, it changes the uid and gid of the link's target.
+// A uid or gid of -1 means to not change that value.
+// If there is an error, it will be of type *PathError.
+//
+// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or
+// EPLAN9 error, wrapped in *PathError.
+func (fs *UnixFS) Chown(name string, uid, gid int) error {
+ return fs.fchown(name, uid, gid, 0)
+}
+
+// Lchown changes the numeric uid and gid of the named file.
+//
+// If the file is a symbolic link, it changes the uid and gid of the link itself.
+// If there is an error, it will be of type *PathError.
+//
+// On Windows, it always returns the syscall.EWINDOWS error, wrapped
+// in *PathError.
+func (fs *UnixFS) Lchown(name string, uid, gid int) error {
+ // With AT_SYMLINK_NOFOLLOW, Fchownat acts like Lchown but allows us to
+ // pass a dirfd.
+ return fs.fchown(name, uid, gid, AT_SYMLINK_NOFOLLOW)
+}
+
+// fchown is a re-usable Fchownat syscall used by Chown and Lchown.
+func (fs *UnixFS) fchown(name string, uid, gid, flags int) error {
+ dirfd, name, closeFd, err := fs.safePath(name)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+ return convertErrorType(unix.Fchownat(dirfd, name, uid, gid, flags))
+}
+
+// Chownat is like Chown but allows passing an existing directory file
+// descriptor rather than needing to resolve one.
+func (fs *UnixFS) Chownat(dirfd int, name string, uid, gid int) error {
+ return convertErrorType(unix.Fchownat(dirfd, name, uid, gid, 0))
+}
+
+// Lchownat is like Lchown but allows passing an existing directory file
+// descriptor rather than needing to resolve one.
+func (fs *UnixFS) Lchownat(dirfd int, name string, uid, gid int) error {
+ return convertErrorType(unix.Fchownat(dirfd, name, uid, gid, AT_SYMLINK_NOFOLLOW))
+}
+
+// Chtimes changes the access and modification times of the named
+// file, similar to the Unix utime() or utimes() functions.
+//
+// The underlying filesystem may truncate or round the values to a
+// less precise time unit.
+//
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) Chtimes(name string, atime, mtime time.Time) error {
+ dirfd, name, closeFd, err := fs.safePath(name)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+ return fs.Chtimesat(dirfd, name, atime, mtime)
+}
+
+// Chtimesat is like Chtimes but allows passing an existing directory file
+// descriptor rather than needing to resolve one.
+func (fs *UnixFS) Chtimesat(dirfd int, name string, atime, mtime time.Time) error {
+ var utimes [2]unix.Timespec
+ set := func(i int, t time.Time) {
+ if t.IsZero() {
+ utimes[i] = unix.Timespec{Sec: unix.UTIME_OMIT, Nsec: unix.UTIME_OMIT}
+ } else {
+ utimes[i] = unix.NsecToTimespec(t.UnixNano())
+ }
+ }
+ set(0, atime)
+ set(1, mtime)
+ // This does support `AT_SYMLINK_NOFOLLOW` as well if needed.
+ if err := unix.UtimesNanoAt(dirfd, name, utimes[0:], 0); err != nil {
+ return convertErrorType(&PathError{Op: "chtimes", Path: name, Err: err})
+ }
+ return nil
+}
+
+// Create creates or truncates the named file. If the file already exists,
+// it is truncated.
+//
+// If the file does not exist, it is created with mode 0666
+// (before umask). If successful, methods on the returned File can
+// be used for I/O; the associated file descriptor has mode O_RDWR.
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) Create(name string) (File, error) {
+ return fs.OpenFile(name, O_CREATE|O_WRONLY|O_TRUNC, 0o644)
+}
+
+// Mkdir creates a new directory with the specified name and permission
+// bits (before umask).
+//
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) Mkdir(name string, mode FileMode) error {
+ dirfd, name, closeFd, err := fs.safePath(name)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+ return fs.Mkdirat(dirfd, name, mode)
+}
+
+func (fs *UnixFS) Mkdirat(dirfd int, name string, mode FileMode) error {
+ return convertErrorType(unix.Mkdirat(dirfd, name, uint32(mode)))
+}
+
+// MkdirAll creates a directory named path, along with any necessary
+// parents, and returns nil, or else returns an error.
+//
+// The permission bits perm (before umask) are used for all
+// directories that MkdirAll creates.
+// If path is already a directory, MkdirAll does nothing
+// and returns nil.
+func (fs *UnixFS) MkdirAll(name string, mode FileMode) error {
+ // Ensure name is somewhat clean before continuing.
+ name, err := fs.unsafePath(name)
+ if err != nil {
+ return err
+ }
+ return fs.mkdirAll(name, mode)
+}
+
+// Open opens the named file for reading.
+//
+// If successful, methods on the returned file can be used for reading; the
+// associated file descriptor has mode O_RDONLY.
+//
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) Open(name string) (File, error) {
+ return fs.OpenFile(name, O_RDONLY, 0)
+}
+
+// OpenFile is the generalized open call; most users will use Open
+// or Create instead. It opens the named file with specified flag
+// (O_RDONLY etc.).
+//
+// If the file does not exist, and the O_CREATE flag
+// is passed, it is created with mode perm (before umask). If successful,
+// methods on the returned File can be used for I/O.
+//
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) OpenFile(name string, flag int, mode FileMode) (File, error) {
+ fd, err := fs.openFile(name, flag, mode)
+ if err != nil {
+ return nil, err
+ }
+ // Do not close `fd` here, it is passed to a file that needs the fd, the
+ // caller of this function is responsible for calling Close() on the File
+ // to release the file descriptor.
+ return os.NewFile(uintptr(fd), name), nil
+}
+
+func (fs *UnixFS) openFile(name string, flag int, mode FileMode) (int, error) {
+ dirfd, name, closeFd, err := fs.safePath(name)
+ defer closeFd()
+ if err != nil {
+ return 0, err
+ }
+ return fs.openat(dirfd, name, flag, mode)
+}
+
+func (fs *UnixFS) OpenFileat(dirfd int, name string, flag int, mode FileMode) (File, error) {
+ fd, err := fs.openat(dirfd, name, flag, mode)
+ if err != nil {
+ return nil, err
+ }
+ // Do not close `fd` here, it is passed to a file that needs the fd, the
+ // caller of this function is responsible for calling Close() on the File
+ // to release the file descriptor.
+ return os.NewFile(uintptr(fd), name), nil
+}
+
+// ReadDir reads the named directory,
+//
+// returning all its directory entries sorted by filename.
+// If an error occurs reading the directory, ReadDir returns the entries it
+// was able to read before the error, along with the error.
+func (fs *UnixFS) ReadDir(path string) ([]DirEntry, error) {
+ dirfd, name, closeFd, err := fs.safePath(path)
+ defer closeFd()
+ if err != nil {
+ return nil, err
+ }
+ fd, err := fs.openat(dirfd, name, O_DIRECTORY|O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer unix.Close(fd)
+ return fs.readDir(fd, name, nil)
+}
+
+// RemoveStat is a combination of Stat and Remove, it is used to more
+// efficiently remove a file when the caller needs to stat it before
+// removing it.
+//
+// This optimized function exists for our QuotaFS abstraction, which needs
+// to track writes to a filesystem. When removing a file, the QuotaFS needs
+// to know if the entry is a file and if so, how large it is. Because we
+// need to Stat a file in order to get its mode and size, we will already
+// know if the entry needs to be removed by using Unlink or Rmdir. The
+// standard `Remove` method just tries both Unlink and Rmdir (in that order)
+// as it ends up usually being faster and more efficient than calling Stat +
+// the proper operation in the first place.
+func (fs *UnixFS) RemoveStat(name string) (FileInfo, error) {
+ dirfd, name, closeFd, err := fs.safePath(name)
+ defer closeFd()
+ if err != nil {
+ return nil, err
+ }
+
+ // Lstat name, we use Lstat as Unlink doesn't care about symlinks.
+ s, err := fs.Lstatat(dirfd, name)
+ if err != nil {
+ return nil, err
+ }
+
+ if s.IsDir() {
+ err = fs.unlinkat(dirfd, name, AT_REMOVEDIR) // Rmdir
+ } else {
+ err = fs.unlinkat(dirfd, name, 0)
+ }
+ if err != nil {
+ return s, convertErrorType(&PathError{Op: "remove", Path: name, Err: err})
+ }
+ return s, nil
+}
+
+// Remove removes the named file or (empty) directory.
+//
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) Remove(name string) error {
+ dirfd, name, closeFd, err := fs.safePath(name)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+
+ // Prevent trying to Remove the base directory.
+ if name == "." {
+ return &PathError{
+ Op: "remove",
+ Path: name,
+ Err: ErrBadPathResolution,
+ }
+ }
+
+ // System call interface forces us to know
+ // whether name is a file or directory.
+ // Try both: it is cheaper on average than
+ // doing a Stat plus the right one.
+ err = fs.unlinkat(dirfd, name, 0)
+ if err == nil {
+ return nil
+ }
+ err1 := fs.unlinkat(dirfd, name, AT_REMOVEDIR) // Rmdir
+ if err1 == nil {
+ return nil
+ }
+
+ // Both failed: figure out which error to return.
+ // OS X and Linux differ on whether unlink(dir)
+ // returns EISDIR, so can't use that. However,
+ // both agree that rmdir(file) returns ENOTDIR,
+ // so we can use that to decide which error is real.
+ // Rmdir might also return ENOTDIR if given a bad
+ // file path, like /etc/passwd/foo, but in that case,
+ // both errors will be ENOTDIR, so it's okay to
+ // use the error from unlink.
+ if err1 != unix.ENOTDIR {
+ err = err1
+ }
+ return convertErrorType(&PathError{Op: "remove", Path: name, Err: err})
+}
+
+// RemoveAll removes path and any children it contains.
+//
+// It removes everything it can but returns the first error
+// it encounters. If the path does not exist, RemoveAll
+// returns nil (no error).
+//
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) RemoveAll(name string) error {
+ name, err := fs.unsafePath(name)
+ if err != nil {
+ return err
+ }
+ // While removeAll internally checks this, I want to make sure we check it
+ // and return the proper error so our tests can ensure that this will never
+ // be a possibility.
+ if name == "." {
+ return &PathError{
+ Op: "removeall",
+ Path: name,
+ Err: ErrBadPathResolution,
+ }
+ }
+ return fs.removeAll(name)
+}
+
+func (fs *UnixFS) unlinkat(dirfd int, name string, flags int) error {
+ return ignoringEINTR(func() error {
+ return unix.Unlinkat(dirfd, name, flags)
+ })
+}
+
+// Rename renames (moves) oldpath to newpath.
+//
+// If newpath already exists and is not a directory, Rename replaces it.
+// OS-specific restrictions may apply when oldpath and newpath are in different directories.
+// Even within the same directory, on non-Unix platforms Rename is not an atomic operation.
+//
+// If there is an error, it will be of type *LinkError.
+func (fs *UnixFS) Rename(oldpath, newpath string) error {
+ // Simple case: both paths are the same.
+ if oldpath == newpath {
+ return nil
+ }
+
+ olddirfd, oldname, closeFd, err := fs.safePath(oldpath)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+ // Ensure that we are not trying to rename the base directory itself.
+ // While unix.Renameat ends up throwing a "device or resource busy" error,
+ // that doesn't mean we are protecting the system properly.
+ if oldname == "." {
+ return convertErrorType(&PathError{
+ Op: "rename",
+ Path: oldname,
+ Err: ErrBadPathResolution,
+ })
+ }
+ // Stat the old target to return proper errors.
+ if _, err := fs.Lstatat(olddirfd, oldname); err != nil {
+ return err
+ }
+
+ newdirfd, newname, closeFd2, err := fs.safePath(newpath)
+ if err != nil {
+ closeFd2()
+ if !errors.Is(err, ErrNotExist) {
+ return convertErrorType(err)
+ }
+ var pathErr *PathError
+ if !errors.As(err, &pathErr) {
+ return convertErrorType(err)
+ }
+ if err := fs.MkdirAll(pathErr.Path, 0o755); err != nil {
+ return err
+ }
+ newdirfd, newname, closeFd2, err = fs.safePath(newpath)
+ defer closeFd2()
+ if err != nil {
+ return err
+ }
+ } else {
+ defer closeFd2()
+ }
+
+ // Ensure that we are not trying to rename the base directory itself.
+ // While unix.Renameat ends up throwing a "device or resource busy" error,
+ // that doesn't mean we are protecting the system properly.
+ if newname == "." {
+ return convertErrorType(&PathError{
+ Op: "rename",
+ Path: newname,
+ Err: ErrBadPathResolution,
+ })
+ }
+ // Stat the new target to return proper errors.
+ _, err = fs.Lstatat(newdirfd, newname)
+ switch {
+ case err == nil:
+ return convertErrorType(&PathError{
+ Op: "rename",
+ Path: newname,
+ Err: ErrExist,
+ })
+ case !errors.Is(err, ErrNotExist):
+ return err
+ }
+ return unix.Renameat(olddirfd, oldname, newdirfd, newname)
+}
+
+// Stat returns a FileInfo describing the named file.
+//
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) Stat(name string) (FileInfo, error) {
+ return fs.fstat(name, 0)
+}
+
+// Statat is like Stat but allows passing an existing directory file
+// descriptor rather than needing to resolve one.
+func (fs *UnixFS) Statat(dirfd int, name string) (FileInfo, error) {
+ return fs.fstatat(dirfd, name, 0)
+}
+
+// Lstat returns a FileInfo describing the named file.
+//
+// If the file is a symbolic link, the returned FileInfo
+// describes the symbolic link. Lstat makes no attempt to follow the link.
+//
+// If there is an error, it will be of type *PathError.
+func (fs *UnixFS) Lstat(name string) (FileInfo, error) {
+ return fs.fstat(name, AT_SYMLINK_NOFOLLOW)
+}
+
+// Lstatat is like Lstat but allows passing an existing directory file
+// descriptor rather than needing to resolve one.
+func (fs *UnixFS) Lstatat(dirfd int, name string) (FileInfo, error) {
+ return fs.fstatat(dirfd, name, AT_SYMLINK_NOFOLLOW)
+}
+
+func (fs *UnixFS) fstat(name string, flags int) (FileInfo, error) {
+ dirfd, name, closeFd, err := fs.safePath(name)
+ defer closeFd()
+ if err != nil {
+ return nil, err
+ }
+ return fs.fstatat(dirfd, name, flags)
+}
+
+func (fs *UnixFS) fstatat(dirfd int, name string, flags int) (FileInfo, error) {
+ var s fileStat
+ if err := ignoringEINTR(func() error {
+ return unix.Fstatat(dirfd, name, &s.sys, flags)
+ }); err != nil {
+ return nil, &PathError{Op: "stat", Path: name, Err: err}
+ }
+ fillFileStatFromSys(&s, name)
+ return &s, nil
+}
+
+// Symlink creates newname as a symbolic link to oldname.
+//
+// On Windows, a symlink to a non-existent oldname creates a file symlink;
+// if oldname is later created as a directory the symlink will not work.
+//
+// If there is an error, it will be of type *LinkError.
+func (fs *UnixFS) Symlink(oldpath, newpath string) error {
+ dirfd, newpath, closeFd, err := fs.safePath(newpath)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+ if err := ignoringEINTR(func() error {
+ // We aren't concerned with oldpath here as a symlink can point anywhere
+ // it wants.
+ return unix.Symlinkat(oldpath, dirfd, newpath)
+ }); err != nil {
+ return &LinkError{Op: "symlink", Old: oldpath, New: newpath, Err: err}
+ }
+ return nil
+}
+
+// Touch will attempt to open a file for reading and/or writing. If the file
+// does not exist it will be created, and any missing parent directories will
+// also be created. The opened file may be truncated, only if `flag` has
+// O_TRUNC set.
+func (fs *UnixFS) Touch(path string, flag int, mode FileMode) (File, error) {
+ if flag&O_CREATE == 0 {
+ flag |= O_CREATE
+ }
+ dirfd, name, closeFd, err := fs.safePath(path)
+ defer closeFd()
+ if err == nil {
+ return fs.OpenFileat(dirfd, name, flag, mode)
+ }
+ if !errors.Is(err, ErrNotExist) {
+ return nil, err
+ }
+ var pathErr *PathError
+ if !errors.As(err, &pathErr) {
+ return nil, err
+ }
+ if err := fs.MkdirAll(pathErr.Path, 0o755); err != nil {
+ return nil, err
+ }
+ // Try to open the file one more time after creating its parent directories.
+ return fs.OpenFile(path, flag, mode)
+}
+
+// WalkDir walks the file tree rooted at root, calling fn for each file or
+// directory in the tree, including root.
+//
+// All errors that arise visiting files and directories are filtered by fn:
+// see the [WalkDirFunc] documentation for details.
+//
+// The files are walked in lexical order, which makes the output deterministic
+// but requires WalkDir to read an entire directory into memory before proceeding
+// to walk that directory.
+//
+// WalkDir does not follow symbolic links found in directories,
+// but if root itself is a symbolic link, its target will be walked.
+func (fs *UnixFS) WalkDir(root string, fn WalkDirFunc) error {
+ return WalkDir(fs, root, fn)
+}
+
+// openat is a wrapper around both unix.Openat and unix.Openat2. If the UnixFS
+// was configured to enable openat2 support, unix.Openat2 will be used instead
+// of unix.Openat due to having better security properties for our use-case.
+func (fs *UnixFS) openat(dirfd int, name string, flag int, mode FileMode) (int, error) {
+ if flag&O_NOFOLLOW == 0 {
+ flag |= O_NOFOLLOW
+ }
+
+ var fd int
+ for {
+ var err error
+ if fs.useOpenat2 {
+ fd, err = fs._openat2(dirfd, name, uint64(flag), uint64(syscallMode(mode)))
+ } else {
+ fd, err = fs._openat(dirfd, name, flag, uint32(syscallMode(mode)))
+ }
+ if err == nil {
+ break
+ }
+ // We have to check EINTR here, per issues https://go.dev/issue/11180 and https://go.dev/issue/39237.
+ if err == unix.EINTR {
+ continue
+ }
+ return 0, convertErrorType(err)
+ }
+
+ // If we are not using openat2, do additional path checking. This assumes
+ // that openat2 is using `RESOLVE_BENEATH` to avoid the same security
+ // issue.
+ if !fs.useOpenat2 {
+ var finalPath string
+ finalPath, err := filepath.EvalSymlinks(filepath.Join("/proc/self/fd/", strconv.Itoa(dirfd)))
+ if err != nil {
+ return fd, convertErrorType(err)
+ }
+ if err != nil {
+ if !errors.Is(err, ErrNotExist) {
+ return fd, fmt.Errorf("failed to evaluate symlink: %w", convertErrorType(err))
+ }
+
+ // The target of one of the symlinks (EvalSymlinks is recursive)
+ // does not exist. So get the path that does not exist and use
+ // that for further validation instead.
+ var pErr *PathError
+ if ok := errors.As(err, &pErr); !ok {
+ return fd, fmt.Errorf("failed to evaluate symlink: %w", convertErrorType(err))
+ }
+ finalPath = pErr.Path
+ }
+
+ // Check if the path is within our root.
+ if !fs.unsafeIsPathInsideOfBase(finalPath) {
+ return fd, convertErrorType(&PathError{
+ Op: "openat",
+ Path: name,
+ Err: ErrBadPathResolution,
+ })
+ }
+ }
+ return fd, nil
+}
+
+// _openat is a wrapper around unix.Openat. This method should never be directly
+// called, use `openat` instead.
+func (fs *UnixFS) _openat(dirfd int, name string, flag int, mode uint32) (int, error) {
+ // Ensure the O_CLOEXEC flag is set.
+ // Go sets this in the os package, but since we are directly using unix
+ // we need to set it ourselves.
+ if flag&O_CLOEXEC == 0 {
+ flag |= O_CLOEXEC
+ }
+ // O_LARGEFILE is set by Openat for us automatically.
+ fd, err := unix.Openat(dirfd, name, flag, mode)
+ switch {
+ case err == nil:
+ return fd, nil
+ case err == unix.EINTR:
+ return 0, err
+ case err == unix.EAGAIN:
+ return 0, err
+ default:
+ return 0, &PathError{Op: "openat", Path: name, Err: err}
+ }
+}
+
+// _openat2 is a wonderful syscall that supersedes the `openat` syscall. It has
+// improved validation and security characteristics that weren't available or
+// considered when `openat` was originally implemented. As such, it is only
+// present in Kernel 5.6 and above.
+//
+// This method should never be directly called, use `openat` instead.
+func (fs *UnixFS) _openat2(dirfd int, name string, flag uint64, mode uint64) (int, error) {
+ // Ensure the O_CLOEXEC flag is set.
+ // Go sets this when using the os package, but since we are directly using
+ // the unix package we need to set it ourselves.
+ if flag&O_CLOEXEC == 0 {
+ flag |= O_CLOEXEC
+ }
+ // Ensure the O_LARGEFILE flag is set.
+ // Go sets this for unix.Open, unix.Openat, but not unix.Openat2.
+ if flag&O_LARGEFILE == 0 {
+ flag |= O_LARGEFILE
+ }
+ fd, err := unix.Openat2(dirfd, name, &unix.OpenHow{
+ Flags: flag,
+ Mode: mode,
+ // This is the bread and butter of preventing a symlink escape, without
+ // this option, we have to handle path validation fully on our own.
+ //
+ // This is why using Openat2 over Openat is preferred if available.
+ Resolve: unix.RESOLVE_BENEATH,
+ })
+ switch {
+ case err == nil:
+ return fd, nil
+ case err == unix.EINTR:
+ return 0, err
+ case err == unix.EAGAIN:
+ return 0, err
+ default:
+ return 0, &PathError{Op: "openat2", Path: name, Err: err}
+ }
+}
+
+func (fs *UnixFS) SafePath(path string) (int, string, func(), error) {
+ return fs.safePath(path)
+}
+
+func (fs *UnixFS) safePath(path string) (dirfd int, file string, closeFd func(), err error) {
+ // Default closeFd to a NO-OP.
+ closeFd = func() {}
+
+ // Use unsafePath to clean the path and strip BasePath if path is absolute.
+ var name string
+ name, err = fs.unsafePath(path)
+ if err != nil {
+ return
+ }
+
+ // Check if dirfd was closed, this will happen if (*UnixFS).Close()
+ // was called.
+ fsDirfd := int(fs.dirfd.Load())
+ if fsDirfd == -1 {
+ err = ErrClosed
+ return
+ }
+
+ // Split the parent from the last element in the path, this gives us the
+ // "file name" and the full path to its parent.
+ var dir string
+ dir, file = filepath.Split(name)
+ // If dir is empty then name is not nested.
+ if dir == "" {
+ // We don't need to set closeFd here as it will default to a NO-OP and
+ // `fs.dirfd` is re-used until the filesystem is no-longer needed.
+ dirfd = fsDirfd
+
+ // Return dirfd, name, an empty closeFd func, and no error
+ return
+ }
+
+ // Dir will usually contain a trailing slash as filepath.Split doesn't
+ // trim slashes.
+ dir = strings.TrimSuffix(dir, "/")
+ dirfd, err = fs.openat(fsDirfd, dir, O_DIRECTORY|O_RDONLY, 0)
+ if dirfd != 0 {
+ // Set closeFd to close the newly opened directory file descriptor.
+ closeFd = func() { _ = unix.Close(dirfd) }
+ }
+
+ // Return dirfd, name, the closeFd func, and err
+ return
+}
+
+// unsafePath prefixes the given path and prefixes it with the filesystem's
+// base path, cleaning the result. The path returned by this function may not
+// be inside the filesystem's base path, additional checks are required to
+// safely use paths returned by this function.
+func (fs *UnixFS) unsafePath(path string) (string, error) {
+ // Calling filepath.Clean on the joined directory will resolve it to the
+ // absolute path, removing any ../ type of resolution arguments, and leaving
+ // us with a direct path link.
+ //
+ // This will also trim the existing root path off the beginning of the path
+ // passed to the function since that can get a bit messy.
+ r := filepath.Clean(filepath.Join(fs.basePath, strings.TrimPrefix(path, fs.basePath)))
+
+ if fs.unsafeIsPathInsideOfBase(r) {
+ // This is kinda ironic isn't it.
+ // We do this as we are operating with dirfds and `*at` syscalls which
+ // behave differently if given an absolute path.
+ //
+ // First trim the BasePath, then trim any leading slashes.
+ r = strings.TrimPrefix(strings.TrimPrefix(r, fs.basePath), "/")
+ // If the path is empty then return "." as the path is pointing to the
+ // root.
+ if r == "" {
+ return ".", nil
+ }
+ return r, nil
+ }
+
+ return "", &PathError{
+ Op: "safePath",
+ Path: path,
+ Err: ErrBadPathResolution,
+ }
+}
+
+// unsafeIsPathInsideOfBase checks if the given path is inside the filesystem's
+// base path.
+func (fs *UnixFS) unsafeIsPathInsideOfBase(path string) bool {
+ return strings.HasPrefix(
+ strings.TrimSuffix(path, "/")+"/",
+ fs.basePath+"/",
+ )
+}
diff --git a/internal/ufs/fs_unix_test.go b/internal/ufs/fs_unix_test.go
new file mode 100644
index 0000000..3cf3b37
--- /dev/null
+++ b/internal/ufs/fs_unix_test.go
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
+
+//go:build unix
+
+package ufs_test
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/pterodactyl/wings/internal/ufs"
+)
+
+type testUnixFS struct {
+ *ufs.UnixFS
+
+ TmpDir string
+ Root string
+}
+
+func (fs *testUnixFS) Cleanup() {
+ _ = fs.Close()
+ _ = os.RemoveAll(fs.TmpDir)
+}
+
+func newTestUnixFS() (*testUnixFS, error) {
+ tmpDir, err := os.MkdirTemp(os.TempDir(), "ufs")
+ if err != nil {
+ return nil, err
+ }
+ root := filepath.Join(tmpDir, "root")
+ if err := os.Mkdir(root, 0o755); err != nil {
+ return nil, err
+ }
+ // TODO: test both disabled and enabled.
+ fs, err := ufs.NewUnixFS(root, false)
+ if err != nil {
+ return nil, err
+ }
+ tfs := &testUnixFS{
+ UnixFS: fs,
+ TmpDir: tmpDir,
+ Root: root,
+ }
+ return tfs, nil
+}
+
+func TestUnixFS_Remove(t *testing.T) {
+ t.Parallel()
+ fs, err := newTestUnixFS()
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ defer fs.Cleanup()
+
+ t.Run("base directory", func(t *testing.T) {
+ // Try to remove the base directory.
+ if err := fs.Remove(""); !errors.Is(err, ufs.ErrBadPathResolution) {
+ t.Errorf("expected an a bad path resolution error, but got: %v", err)
+ return
+ }
+ })
+
+ t.Run("path traversal", func(t *testing.T) {
+ // Try to remove the base directory.
+ if err := fs.RemoveAll("../root"); !errors.Is(err, ufs.ErrBadPathResolution) {
+ t.Errorf("expected an a bad path resolution error, but got: %v", err)
+ return
+ }
+ })
+}
+
+func TestUnixFS_RemoveAll(t *testing.T) {
+ t.Parallel()
+ fs, err := newTestUnixFS()
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ defer fs.Cleanup()
+
+ t.Run("base directory", func(t *testing.T) {
+ // Try to remove the base directory.
+ if err := fs.RemoveAll(""); !errors.Is(err, ufs.ErrBadPathResolution) {
+ t.Errorf("expected an a bad path resolution error, but got: %v", err)
+ return
+ }
+ })
+
+ t.Run("path traversal", func(t *testing.T) {
+ // Try to remove the base directory.
+ if err := fs.RemoveAll("../root"); !errors.Is(err, ufs.ErrBadPathResolution) {
+ t.Errorf("expected an a bad path resolution error, but got: %v", err)
+ return
+ }
+ })
+}
+
+func TestUnixFS_Rename(t *testing.T) {
+ t.Parallel()
+ fs, err := newTestUnixFS()
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ defer fs.Cleanup()
+
+ t.Run("rename base directory", func(t *testing.T) {
+ // Try to rename the base directory.
+ if err := fs.Rename("", "yeet"); !errors.Is(err, ufs.ErrBadPathResolution) {
+ t.Errorf("expected an a bad path resolution error, but got: %v", err)
+ return
+ }
+ })
+
+ t.Run("rename over base directory", func(t *testing.T) {
+ // Create a directory that we are going to try and move over top of the
+ // existing base directory.
+ if err := fs.Mkdir("overwrite_dir", 0o755); err != nil {
+ t.Error(err)
+ return
+ }
+
+ // Try to rename over the base directory.
+ if err := fs.Rename("overwrite_dir", ""); !errors.Is(err, ufs.ErrBadPathResolution) {
+ t.Errorf("expected an a bad path resolution error, but got: %v", err)
+ return
+ }
+ })
+
+ t.Run("directory rename", func(t *testing.T) {
+ // Create a directory to rename to something else.
+ if err := fs.Mkdir("test_directory", 0o755); err != nil {
+ t.Error(err)
+ return
+ }
+
+ // Try to rename "test_directory" to "directory".
+ if err := fs.Rename("test_directory", "directory"); err != nil {
+ t.Errorf("expected no error, but got: %v", err)
+ return
+ }
+
+ // Sanity check
+ if _, err := os.Lstat(filepath.Join(fs.Root, "directory")); err != nil {
+ t.Errorf("Lstat errored when performing sanity check: %v", err)
+ return
+ }
+ })
+
+ t.Run("file rename", func(t *testing.T) {
+ // Create a directory to rename to something else.
+ if f, err := fs.Create("test_file"); err != nil {
+ t.Error(err)
+ return
+ } else {
+ _ = f.Close()
+ }
+
+ // Try to rename "test_file" to "file".
+ if err := fs.Rename("test_file", "file"); err != nil {
+ t.Errorf("expected no error, but got: %v", err)
+ return
+ }
+
+ // Sanity check
+ if _, err := os.Lstat(filepath.Join(fs.Root, "file")); err != nil {
+ t.Errorf("Lstat errored when performing sanity check: %v", err)
+ return
+ }
+ })
+}
+
+func TestUnixFS_Touch(t *testing.T) {
+ t.Parallel()
+ fs, err := newTestUnixFS()
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+ defer fs.Cleanup()
+
+ t.Run("base directory", func(t *testing.T) {
+ path := "i_touched_a_file"
+ f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ _ = f.Close()
+
+ // Sanity check
+ if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
+ t.Errorf("Lstat errored when performing sanity check: %v", err)
+ return
+ }
+ })
+
+ t.Run("existing parent directory", func(t *testing.T) {
+ dir := "some_parent_directory"
+ if err := fs.Mkdir(dir, 0o755); err != nil {
+ t.Errorf("error creating parent directory: %v", err)
+ return
+ }
+ path := filepath.Join(dir, "i_touched_a_file")
+ f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
+ if err != nil {
+ t.Errorf("error touching file: %v", err)
+ return
+ }
+ _ = f.Close()
+
+ // Sanity check
+ if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
+ t.Errorf("Lstat errored when performing sanity check: %v", err)
+ return
+ }
+ })
+
+ t.Run("non-existent parent directory", func(t *testing.T) {
+ path := "some_other_directory/i_touched_a_file"
+ f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
+ if err != nil {
+ t.Errorf("error touching file: %v", err)
+ return
+ }
+ _ = f.Close()
+
+ // Sanity check
+ if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
+ t.Errorf("Lstat errored when performing sanity check: %v", err)
+ return
+ }
+ })
+
+ t.Run("non-existent parent directories", func(t *testing.T) {
+ path := "some_other_directory/some_directory/i_touched_a_file"
+ f, err := fs.Touch(path, ufs.O_RDWR, 0o644)
+ if err != nil {
+ t.Errorf("error touching file: %v", err)
+ return
+ }
+ _ = f.Close()
+
+ // Sanity check
+ if _, err := os.Lstat(filepath.Join(fs.Root, path)); err != nil {
+ t.Errorf("Lstat errored when performing sanity check: %v", err)
+ return
+ }
+ })
+}
diff --git a/internal/ufs/go.LICENSE b/internal/ufs/go.LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/internal/ufs/go.LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/internal/ufs/mkdir_unix.go b/internal/ufs/mkdir_unix.go
new file mode 100644
index 0000000..88d3938
--- /dev/null
+++ b/internal/ufs/mkdir_unix.go
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Code in this file was derived from `go/src/os/path.go`.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the `go.LICENSE` file.
+
+//go:build unix
+
+package ufs
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+// mkdirAll is a recursive Mkdir implementation that properly handles symlinks.
+func (fs *UnixFS) mkdirAll(name string, mode FileMode) error {
+ // Fast path: if we can tell whether path is a directory or file, stop with success or error.
+ dir, err := fs.Lstat(name)
+ if err == nil {
+ if dir.Mode()&ModeSymlink != 0 {
+ // If the final path is a symlink, resolve its target and use that
+ // to check instead.
+ dir, err = fs.Stat(name)
+ if err != nil {
+ return err
+ }
+ }
+ if dir.IsDir() {
+ return nil
+ }
+ return convertErrorType(&PathError{Op: "mkdir", Path: name, Err: unix.ENOTDIR})
+ }
+
+ // Slow path: make sure parent exists and then call Mkdir for path.
+ i := len(name)
+ for i > 0 && name[i-1] == '/' { // Skip trailing path separator.
+ i--
+ }
+
+ j := i
+ for j > 0 && name[j-1] != '/' { // Scan backward over element.
+ j--
+ }
+
+ if j > 1 {
+ // Create parent.
+ err = fs.mkdirAll(name[:j-1], mode)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Parent now exists; invoke Mkdir and use its result.
+ err = fs.Mkdir(name, mode)
+ if err != nil {
+ // Handle arguments like "foo/." by
+ // double-checking that directory doesn't exist.
+ dir, err1 := fs.Lstat(name)
+ if err1 == nil && dir.IsDir() {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
diff --git a/internal/ufs/path_unix.go b/internal/ufs/path_unix.go
new file mode 100644
index 0000000..f82f09a
--- /dev/null
+++ b/internal/ufs/path_unix.go
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Code in this file was copied from `go/src/os/path.go`
+// and `go/src/os/path_unix.go`.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the `go.LICENSE` file.
+
+//go:build unix
+
+package ufs
+
+import (
+ "os"
+)
+
+// basename removes trailing slashes and the leading directory name from path name.
+func basename(name string) string {
+ i := len(name) - 1
+ // Remove trailing slashes
+ for ; i > 0 && name[i] == '/'; i-- {
+ name = name[:i]
+ }
+ // Remove leading directory name
+ for i--; i >= 0; i-- {
+ if name[i] == '/' {
+ name = name[i+1:]
+ break
+ }
+ }
+ return name
+}
+
+// endsWithDot reports whether the final component of path is ".".
+func endsWithDot(path string) bool {
+ if path == "." {
+ return true
+ }
+ if len(path) >= 2 && path[len(path)-1] == '.' && os.IsPathSeparator(path[len(path)-2]) {
+ return true
+ }
+ return false
+}
+
+// splitPath returns the base name and parent directory.
+func splitPath(path string) (string, string) {
+ // if no better parent is found, the path is relative from "here"
+ dirname := "."
+
+ // Remove all but one leading slash.
+ for len(path) > 1 && path[0] == '/' && path[1] == '/' {
+ path = path[1:]
+ }
+
+ i := len(path) - 1
+
+ // Remove trailing slashes.
+ for ; i > 0 && path[i] == '/'; i-- {
+ path = path[:i]
+ }
+
+ // if no slashes in path, base is path
+ basename := path
+
+ // Remove leading directory path
+ for i--; i >= 0; i-- {
+ if path[i] == '/' {
+ if i == 0 {
+ dirname = path[:1]
+ } else {
+ dirname = path[:i]
+ }
+ basename = path[i+1:]
+ break
+ }
+ }
+
+ return dirname, basename
+}
diff --git a/internal/ufs/quota_writer.go b/internal/ufs/quota_writer.go
new file mode 100644
index 0000000..33c083f
--- /dev/null
+++ b/internal/ufs/quota_writer.go
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2024 Matthew Penner
+
+package ufs
+
+import (
+ "errors"
+ "io"
+ "sync/atomic"
+)
+
+// CountedWriter is a writer that counts the amount of data written to the
+// underlying writer.
+type CountedWriter struct {
+ File
+
+ counter atomic.Int64
+ err error
+}
+
+// NewCountedWriter returns a new countedWriter that counts the amount of bytes
+// written to the underlying writer.
+func NewCountedWriter(f File) *CountedWriter {
+ return &CountedWriter{File: f}
+}
+
+// BytesWritten returns the amount of bytes that have been written to the
+// underlying writer.
+func (w *CountedWriter) BytesWritten() int64 {
+ return w.counter.Load()
+}
+
+// Error returns the error from the writer if any. If the error is an EOF, nil
+// will be returned.
+func (w *CountedWriter) Error() error {
+ if errors.Is(w.err, io.EOF) {
+ return nil
+ }
+ return w.err
+}
+
+// Write writes bytes to the underlying writer while tracking the total amount
+// of bytes written.
+func (w *CountedWriter) Write(p []byte) (int, error) {
+ if w.err != nil {
+ return 0, io.EOF
+ }
+
+ // Write is a very simple operation for us to handle.
+ n, err := w.File.Write(p)
+ w.counter.Add(int64(n))
+ w.err = err
+
+ // TODO: is this how we actually want to handle errors with this?
+ if err == io.EOF {
+ return n, io.EOF
+ } else {
+ return n, nil
+ }
+}
+
+func (w *CountedWriter) ReadFrom(r io.Reader) (n int64, err error) {
+ cr := NewCountedReader(r)
+ n, err = w.File.ReadFrom(cr)
+ w.counter.Add(n)
+ return
+}
+
+// CountedReader is a reader that counts the amount of data read from the
+// underlying reader.
+type CountedReader struct {
+ reader io.Reader
+
+ counter atomic.Int64
+ err error
+}
+
+var _ io.Reader = (*CountedReader)(nil)
+
+// NewCountedReader returns a new countedReader that counts the amount of bytes
+// read from the underlying reader.
+func NewCountedReader(r io.Reader) *CountedReader {
+ return &CountedReader{reader: r}
+}
+
+// BytesRead returns the amount of bytes that have been read from the underlying
+// reader.
+func (r *CountedReader) BytesRead() int64 {
+ return r.counter.Load()
+}
+
+// Error returns the error from the reader if any. If the error is an EOF, nil
+// will be returned.
+func (r *CountedReader) Error() error {
+ if errors.Is(r.err, io.EOF) {
+ return nil
+ }
+ return r.err
+}
+
+// Read reads bytes from the underlying reader while tracking the total amount
+// of bytes read.
+func (r *CountedReader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, io.EOF
+ }
+
+ n, err := r.reader.Read(p)
+ r.counter.Add(int64(n))
+ r.err = err
+
+ if err == io.EOF {
+ return n, io.EOF
+ } else {
+ return n, nil
+ }
+}
diff --git a/internal/ufs/removeall_unix.go b/internal/ufs/removeall_unix.go
new file mode 100644
index 0000000..38a6e07
--- /dev/null
+++ b/internal/ufs/removeall_unix.go
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Code in this file was derived from `go/src/os/removeall_at.go`.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the `go.LICENSE` file.
+
+//go:build unix
+
+package ufs
+
+import (
+ "errors"
+ "io"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+type unixFS interface {
+ Open(name string) (File, error)
+ Remove(name string) error
+ unlinkat(dirfd int, path string, flags int) error
+}
+
+func (fs *UnixFS) removeAll(path string) error {
+ return removeAll(fs, path)
+}
+
+func removeAll(fs unixFS, path string) error {
+ if path == "" {
+ // fail silently to retain compatibility with previous behavior
+ // of RemoveAll. See issue https://go.dev/issue/28830.
+ return nil
+ }
+
+ // The rmdir system call does not permit removing ".",
+ // so we don't permit it either.
+ if endsWithDot(path) {
+ return &PathError{Op: "removeall", Path: path, Err: unix.EINVAL}
+ }
+
+ // Simple case: if Remove works, we're done.
+ err := fs.Remove(path)
+ if err == nil || errors.Is(err, ErrNotExist) {
+ return nil
+ }
+
+ // RemoveAll recurses by deleting the path base from
+ // its parent directory
+ parentDir, base := splitPath(path)
+
+ parent, err := fs.Open(parentDir)
+ if errors.Is(err, ErrNotExist) {
+ // If parent does not exist, base cannot exist. Fail silently
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ defer parent.Close()
+
+ if err := removeAllFrom(fs, parent, base); err != nil {
+ if pathErr, ok := err.(*PathError); ok {
+ pathErr.Path = parentDir + string(os.PathSeparator) + pathErr.Path
+ err = pathErr
+ }
+ return convertErrorType(err)
+ }
+ return nil
+}
+
+func removeAllFrom(fs unixFS, parent File, base string) error {
+ parentFd := int(parent.Fd())
+ // Simple case: if Unlink (aka remove) works, we're done.
+ err := fs.unlinkat(parentFd, base, 0)
+ if err == nil || errors.Is(err, ErrNotExist) {
+ return nil
+ }
+
+ // EISDIR means that we have a directory, and we need to
+ // remove its contents.
+ // EPERM or EACCES means that we don't have write permission on
+ // the parent directory, but this entry might still be a directory
+ // whose contents need to be removed.
+ // Otherwise, just return the error.
+ if err != unix.EISDIR && err != unix.EPERM && err != unix.EACCES {
+ return &PathError{Op: "unlinkat", Path: base, Err: err}
+ }
+
+ // Is this a directory we need to recurse into?
+ var statInfo unix.Stat_t
+ statErr := ignoringEINTR(func() error {
+ return unix.Fstatat(parentFd, base, &statInfo, AT_SYMLINK_NOFOLLOW)
+ })
+ if statErr != nil {
+ if errors.Is(statErr, ErrNotExist) {
+ return nil
+ }
+ return &PathError{Op: "fstatat", Path: base, Err: statErr}
+ }
+ if statInfo.Mode&unix.S_IFMT != unix.S_IFDIR {
+ // Not a directory; return the error from the unix.Unlinkat.
+ return &PathError{Op: "unlinkat", Path: base, Err: err}
+ }
+
+ // Remove the directory's entries.
+ var recurseErr error
+ for {
+ const reqSize = 1024
+ var respSize int
+
+ // Open the directory to recurse into
+ file, err := openFdAt(parentFd, base)
+ if err != nil {
+ if errors.Is(err, ErrNotExist) {
+ return nil
+ }
+ recurseErr = &PathError{Op: "openfdat", Path: base, Err: err}
+ break
+ }
+
+ for {
+ numErr := 0
+
+ names, readErr := file.Readdirnames(reqSize)
+ // Errors other than EOF should stop us from continuing.
+ if readErr != nil && readErr != io.EOF {
+ _ = file.Close()
+ if errors.Is(readErr, ErrNotExist) {
+ return nil
+ }
+ return &PathError{Op: "readdirnames", Path: base, Err: readErr}
+ }
+
+ respSize = len(names)
+ for _, name := range names {
+ err := removeAllFrom(fs, file, name)
+ if err != nil {
+ if pathErr, ok := err.(*PathError); ok {
+ pathErr.Path = base + string(os.PathSeparator) + pathErr.Path
+ }
+ numErr++
+ if recurseErr == nil {
+ recurseErr = err
+ }
+ }
+ }
+
+ // If we can delete any entry, break to start new iteration.
+ // Otherwise, we discard current names, get next entries and try deleting them.
+ if numErr != reqSize {
+ break
+ }
+ }
+
+ // Removing files from the directory may have caused
+ // the OS to reshuffle it. Simply calling Readdirnames
+ // again may skip some entries. The only reliable way
+ // to avoid this is to close and re-open the
+ // directory. See issue https://go.dev/issue/20841.
+ _ = file.Close()
+
+ // Finish when the end of the directory is reached
+ if respSize < reqSize {
+ break
+ }
+ }
+
+ // Remove the directory itself.
+ unlinkErr := fs.unlinkat(parentFd, base, AT_REMOVEDIR)
+ if unlinkErr == nil || errors.Is(unlinkErr, ErrNotExist) {
+ return nil
+ }
+
+ if recurseErr != nil {
+ return recurseErr
+ }
+ return &PathError{Op: "unlinkat", Path: base, Err: unlinkErr}
+}
+
+// openFdAt opens path relative to the directory in fd.
+// Other than that this should act like openFileNolog.
+// This acts like openFileNolog rather than OpenFile because
+// we are going to (try to) remove the file.
+// The contents of this file are not relevant for test caching.
+func openFdAt(dirfd int, name string) (File, error) {
+ var fd int
+ for {
+ var err error
+ fd, err = unix.Openat(dirfd, name, O_RDONLY|O_CLOEXEC|O_NOFOLLOW, 0)
+ if err == nil {
+ break
+ }
+
+ // See comment in openFileNolog.
+ if err == unix.EINTR {
+ continue
+ }
+
+ return nil, err
+ }
+ // This is stupid, os.NewFile immediately casts `fd` to an `int`, but wants
+ // it to be passed as a `uintptr`.
+ return os.NewFile(uintptr(fd), name), nil
+}
diff --git a/internal/ufs/stat_unix.go b/internal/ufs/stat_unix.go
new file mode 100644
index 0000000..3339fbb
--- /dev/null
+++ b/internal/ufs/stat_unix.go
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Code in this file was copied from `go/src/os/stat_linux.go`
+// and `go/src/os/types_unix.go`.
+
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the `go.LICENSE` file.
+
+//go:build unix
+
+package ufs
+
+import (
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+type fileStat struct {
+ name string
+ size int64
+ mode FileMode
+ modTime time.Time
+ sys unix.Stat_t
+}
+
+var _ FileInfo = (*fileStat)(nil)
+
+func (fs *fileStat) Size() int64 { return fs.size }
+func (fs *fileStat) Mode() FileMode { return fs.mode }
+func (fs *fileStat) ModTime() time.Time { return fs.modTime }
+func (fs *fileStat) Sys() any { return &fs.sys }
+func (fs *fileStat) Name() string { return fs.name }
+func (fs *fileStat) IsDir() bool { return fs.Mode().IsDir() }
+
+func fillFileStatFromSys(fs *fileStat, name string) {
+ fs.name = basename(name)
+ fs.size = fs.sys.Size
+ fs.modTime = time.Unix(fs.sys.Mtim.Unix())
+ fs.mode = FileMode(fs.sys.Mode & 0o777)
+ switch fs.sys.Mode & unix.S_IFMT {
+ case unix.S_IFBLK:
+ fs.mode |= ModeDevice
+ case unix.S_IFCHR:
+ fs.mode |= ModeDevice | ModeCharDevice
+ case unix.S_IFDIR:
+ fs.mode |= ModeDir
+ case unix.S_IFIFO:
+ fs.mode |= ModeNamedPipe
+ case unix.S_IFLNK:
+ fs.mode |= ModeSymlink
+ case unix.S_IFREG:
+ // nothing to do
+ case unix.S_IFSOCK:
+ fs.mode |= ModeSocket
+ }
+ if fs.sys.Mode&unix.S_ISGID != 0 {
+ fs.mode |= ModeSetgid
+ }
+ if fs.sys.Mode&unix.S_ISUID != 0 {
+ fs.mode |= ModeSetuid
+ }
+ if fs.sys.Mode&unix.S_ISVTX != 0 {
+ fs.mode |= ModeSticky
+ }
+}
diff --git a/internal/ufs/walk.go b/internal/ufs/walk.go
new file mode 100644
index 0000000..b0025a8
--- /dev/null
+++ b/internal/ufs/walk.go
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: BSD-3-Clause
+
+// Code in this file was derived from `go/src/io/fs/walk.go`.
+
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the `go.LICENSE` file.
+
+package ufs
+
+import (
+ iofs "io/fs"
+ "path"
+)
+
+// SkipDir is used as a return value from [WalkDirFunc] to indicate that
+// the directory named in the call is to be skipped. It is not returned
+// as an error by any function.
+var SkipDir = iofs.SkipDir
+
+// SkipAll is used as a return value from [WalkDirFunc] to indicate that
+// all remaining files and directories are to be skipped. It is not returned
+// as an error by any function.
+var SkipAll = iofs.SkipAll
+
+// WalkDirFunc is the type of the function called by [WalkDir] to visit
+// each file or directory.
+//
+// The path argument contains the argument to [WalkDir] as a prefix.
+// That is, if WalkDir is called with root argument "dir" and finds a file
+// named "a" in that directory, the walk function will be called with
+// argument "dir/a".
+//
+// The d argument is the [DirEntry] for the named path.
+//
+// The error result returned by the function controls how [WalkDir]
+// continues. If the function returns the special value [SkipDir], WalkDir
+// skips the current directory (path if d.IsDir() is true, otherwise
+// path's parent directory). If the function returns the special value
+// [SkipAll], WalkDir skips all remaining files and directories. Otherwise,
+// if the function returns a non-nil error, WalkDir stops entirely and
+// returns that error.
+//
+// The err argument reports an error related to path, signaling that
+// [WalkDir] will not walk into that directory. The function can decide how
+// to handle that error; as described earlier, returning the error will
+// cause WalkDir to stop walking the entire tree.
+//
+// [WalkDir] calls the function with a non-nil err argument in two cases.
+//
+// First, if the initial [Stat] on the root directory fails, WalkDir
+// calls the function with path set to root, d set to nil, and err set to
+// the error from [fs.Stat].
+//
+// Second, if a directory's ReadDir method (see [ReadDirFile]) fails, WalkDir calls the
+// function with path set to the directory's path, d set to an
+// [DirEntry] describing the directory, and err set to the error from
+// ReadDir. In this second case, the function is called twice with the
+// path of the directory: the first call is before the directory read is
+// attempted and has err set to nil, giving the function a chance to
+// return [SkipDir] or [SkipAll] and avoid the ReadDir entirely. The second call
+// is after a failed ReadDir and reports the error from ReadDir.
+// (If ReadDir succeeds, there is no second call.)
+type WalkDirFunc func(path string, d DirEntry, err error) error
+
+// WalkDir walks the file tree rooted at root, calling fn for each file or
+// directory in the tree, including root.
+//
+// All errors that arise visiting files and directories are filtered by fn:
+// see the [WalkDirFunc] documentation for details.
+//
+// The files are walked in lexical order, which makes the output deterministic
+// but requires WalkDir to read an entire directory into memory before proceeding
+// to walk that directory.
+//
+// WalkDir does not follow symbolic links found in directories,
+// but if root itself is a symbolic link, its target will be walked.
+func WalkDir(fs Filesystem, root string, fn WalkDirFunc) error {
+ info, err := fs.Stat(root)
+ if err != nil {
+ err = fn(root, nil, err)
+ } else {
+ err = walkDir(fs, root, iofs.FileInfoToDirEntry(info), fn)
+ }
+ if err == SkipDir || err == SkipAll {
+ return nil
+ }
+ return err
+}
+
+// walkDir recursively descends path, calling walkDirFn.
+func walkDir(fs Filesystem, name string, d DirEntry, walkDirFn WalkDirFunc) error {
+ if err := walkDirFn(name, d, nil); err != nil || !d.IsDir() {
+ if err == SkipDir && d.IsDir() {
+ // Successfully skipped directory.
+ err = nil
+ }
+ return err
+ }
+
+ dirs, err := fs.ReadDir(name)
+ if err != nil {
+ // Second call, to report ReadDir error.
+ err = walkDirFn(name, d, err)
+ if err != nil {
+ if err == SkipDir && d.IsDir() {
+ err = nil
+ }
+ return err
+ }
+ }
+
+ for _, d1 := range dirs {
+ name1 := path.Join(name, d1.Name())
+ if err := walkDir(fs, name1, d1, walkDirFn); err != nil {
+ if err == SkipDir {
+ break
+ }
+ return err
+ }
+ }
+ return nil
+}
diff --git a/internal/ufs/walk_unix.go b/internal/ufs/walk_unix.go
new file mode 100644
index 0000000..8472edf
--- /dev/null
+++ b/internal/ufs/walk_unix.go
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: BSD-2-Clause
+
+// Some code in this file was derived from https://github.com/karrick/godirwalk.
+
+//go:build unix
+
+package ufs
+
+import (
+ "bytes"
+ iofs "io/fs"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+)
+
+type WalkDiratFunc func(dirfd int, name, relative string, d DirEntry, err error) error
+
+func (fs *UnixFS) WalkDirat(dirfd int, name string, fn WalkDiratFunc) error {
+ if dirfd == 0 {
+ // TODO: proper validation, ideally a dedicated function.
+ dirfd = int(fs.dirfd.Load())
+ }
+ info, err := fs.Lstatat(dirfd, name)
+ if err != nil {
+ err = fn(dirfd, name, name, nil, err)
+ } else {
+ b := newScratchBuffer()
+ err = fs.walkDir(b, dirfd, name, name, iofs.FileInfoToDirEntry(info), fn)
+ }
+ if err == SkipDir || err == SkipAll {
+ return nil
+ }
+ return err
+}
+
+func (fs *UnixFS) walkDir(b []byte, parentfd int, name, relative string, d DirEntry, walkDirFn WalkDiratFunc) error {
+ if err := walkDirFn(parentfd, name, relative, d, nil); err != nil || !d.IsDir() {
+ if err == SkipDir && d.IsDir() {
+ // Successfully skipped directory.
+ err = nil
+ }
+ return err
+ }
+
+ dirfd, err := fs.openat(parentfd, name, O_DIRECTORY|O_RDONLY, 0)
+ if err != nil {
+ return err
+ }
+ defer unix.Close(dirfd)
+
+ dirs, err := fs.readDir(dirfd, name, b)
+ if err != nil {
+ // Second call, to report ReadDir error.
+ err = walkDirFn(dirfd, name, relative, d, err)
+ if err != nil {
+ if err == SkipDir && d.IsDir() {
+ err = nil
+ }
+ return err
+ }
+ }
+
+ for _, d1 := range dirs {
+ // TODO: the path.Join on this line may actually be partially incorrect.
+ // If we are not walking starting at the root, relative will contain the
+ // name of the directory we are starting the walk from, which will be
+ // relative to the root of the filesystem instead of from where the walk
+ // was initiated from.
+ //
+ // ref; https://github.com/pterodactyl/panel/issues/5030
+ if err := fs.walkDir(b, dirfd, d1.Name(), path.Join(relative, d1.Name()), d1, walkDirFn); err != nil {
+ if err == SkipDir {
+ break
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// ReadDirMap .
+// TODO: document
+func ReadDirMap[T any](fs *UnixFS, path string, fn func(DirEntry) (T, error)) ([]T, error) {
+ dirfd, name, closeFd, err := fs.safePath(path)
+ defer closeFd()
+ if err != nil {
+ return nil, err
+ }
+ fd, err := fs.openat(dirfd, name, O_DIRECTORY|O_RDONLY, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer unix.Close(fd)
+
+ entries, err := fs.readDir(fd, ".", nil)
+ if err != nil {
+ return nil, err
+ }
+
+ out := make([]T, len(entries))
+ for i, e := range entries {
+ idx := i
+ e := e
+ v, err := fn(e)
+ if err != nil {
+ return nil, err
+ }
+ out[idx] = v
+ }
+ return out, nil
+}
+
+// nameOffset is a compile time constant
+const nameOffset = int(unsafe.Offsetof(unix.Dirent{}.Name))
+
+func nameFromDirent(de *unix.Dirent) (name []byte) {
+ // Because this GOOS' syscall.Dirent does not provide a field that specifies
+ // the name length, this function must first calculate the max possible name
+ // length, and then search for the NULL byte.
+ ml := int(de.Reclen) - nameOffset
+
+ // Convert syscall.Dirent.Name, which is array of int8, to []byte, by
+ // overwriting Cap, Len, and Data slice header fields to the max possible
+ // name length computed above, and finding the terminating NULL byte.
+ //
+ // TODO: is there an alternative to the deprecated SliceHeader?
+ // SliceHeader was mainly deprecated due to it being misused for avoiding
+ // allocations when converting a byte slice to a string, ref;
+ // https://go.dev/issue/53003
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&name))
+ sh.Cap = ml
+ sh.Len = ml
+ sh.Data = uintptr(unsafe.Pointer(&de.Name[0]))
+
+ if index := bytes.IndexByte(name, 0); index >= 0 {
+ // Found NULL byte; set slice's cap and len accordingly.
+ sh.Cap = index
+ sh.Len = index
+ return
+ }
+
+ // NOTE: This branch is not expected, but included for defensive
+ // programming, and provides a hard stop on the name based on the structure
+ // field array size.
+ sh.Cap = len(de.Name)
+ sh.Len = sh.Cap
+ return
+}
+
+// modeTypeFromDirent converts a syscall defined constant, which is in purview
+// of OS, to a constant defined by Go, assumed by this project to be stable.
+//
+// When the syscall constant is not recognized, this function falls back to a
+// Stat on the file system.
+func (fs *UnixFS) modeTypeFromDirent(fd int, de *unix.Dirent, osDirname, osBasename string) (FileMode, error) {
+ switch de.Type {
+ case unix.DT_REG:
+ return 0, nil
+ case unix.DT_DIR:
+ return ModeDir, nil
+ case unix.DT_LNK:
+ return ModeSymlink, nil
+ case unix.DT_CHR:
+ return ModeDevice | ModeCharDevice, nil
+ case unix.DT_BLK:
+ return ModeDevice, nil
+ case unix.DT_FIFO:
+ return ModeNamedPipe, nil
+ case unix.DT_SOCK:
+ return ModeSocket, nil
+ default:
+ // If syscall returned unknown type (e.g., DT_UNKNOWN, DT_WHT), then
+ // resolve actual mode by reading file information.
+ return fs.modeType(fd, filepath.Join(osDirname, osBasename))
+ }
+}
+
+// modeType returns the mode type of the file system entry identified by
+// osPathname by calling os.LStat function, to intentionally not follow symbolic
+// links.
+//
+// Even though os.LStat provides all file mode bits, we want to ensure same
+// values returned to caller regardless of whether we obtained file mode bits
+// from syscall or stat call. Therefore, mask out the additional file mode bits
+// that are provided by stat but not by the syscall, so users can rely on their
+// values.
+func (fs *UnixFS) modeType(dirfd int, name string) (os.FileMode, error) {
+ fi, err := fs.Lstatat(dirfd, name)
+ if err == nil {
+ return fi.Mode() & ModeType, nil
+ }
+ return 0, err
+}
+
+var minimumScratchBufferSize = os.Getpagesize()
+
+func newScratchBuffer() []byte {
+ return make([]byte, minimumScratchBufferSize)
+}
+
+func (fs *UnixFS) readDir(fd int, name string, b []byte) ([]DirEntry, error) {
+ scratchBuffer := b
+ if scratchBuffer == nil || len(scratchBuffer) < minimumScratchBufferSize {
+ scratchBuffer = newScratchBuffer()
+ }
+
+ var entries []DirEntry
+ var workBuffer []byte
+
+ var sde unix.Dirent
+ for {
+ if len(workBuffer) == 0 {
+ n, err := unix.Getdents(fd, scratchBuffer)
+ if err != nil {
+ if err == unix.EINTR {
+ continue
+ }
+ return nil, convertErrorType(err)
+ }
+ if n <= 0 {
+ // end of directory: normal exit
+ return entries, nil
+ }
+ workBuffer = scratchBuffer[:n] // trim work buffer to number of bytes read
+ }
+
+ // "Go is like C, except that you just put `unsafe` all over the place".
+ copy((*[unsafe.Sizeof(unix.Dirent{})]byte)(unsafe.Pointer(&sde))[:], workBuffer)
+ workBuffer = workBuffer[sde.Reclen:] // advance buffer for next iteration through loop
+
+ if sde.Ino == 0 {
+ continue // inode set to 0 indicates an entry that was marked as deleted
+ }
+
+ nameSlice := nameFromDirent(&sde)
+ nameLength := len(nameSlice)
+
+ if nameLength == 0 || (nameSlice[0] == '.' && (nameLength == 1 || (nameLength == 2 && nameSlice[1] == '.'))) {
+ continue
+ }
+
+ childName := string(nameSlice)
+ mt, err := fs.modeTypeFromDirent(fd, &sde, name, childName)
+ if err != nil {
+ return nil, convertErrorType(err)
+ }
+ entries = append(entries, &dirent{name: childName, path: name, modeType: mt, dirfd: fd, fs: fs})
+ }
+}
+
+// dirent stores the name and file system mode type of discovered file system
+// entries.
+type dirent struct {
+ name string
+ path string
+ modeType FileMode
+
+ dirfd int
+ fs *UnixFS
+}
+
+func (de dirent) Name() string {
+ return de.name
+}
+
+func (de dirent) IsDir() bool {
+ return de.modeType&ModeDir != 0
+}
+
+func (de dirent) Type() FileMode {
+ return de.modeType
+}
+
+func (de dirent) Info() (FileInfo, error) {
+ if de.fs == nil {
+ return nil, nil
+ }
+ return de.fs.Lstatat(de.dirfd, de.name)
+}
+
+func (de dirent) Open() (File, error) {
+ if de.fs == nil {
+ return nil, nil
+ }
+ return de.fs.OpenFileat(de.dirfd, de.name, O_RDONLY, 0)
+}
+
+// reset releases memory held by entry err and name, and resets mode type to 0.
+func (de *dirent) reset() {
+ de.name = ""
+ de.path = ""
+ de.modeType = 0
+}
diff --git a/parser/helpers.go b/parser/helpers.go
index a8e8ec7..be09c68 100644
--- a/parser/helpers.go
+++ b/parser/helpers.go
@@ -2,8 +2,6 @@ package parser
import (
"bytes"
- "io"
- "os"
"regexp"
"strconv"
"strings"
@@ -29,24 +27,14 @@ var configMatchRegex = regexp.MustCompile(`{{\s?config\.([\w.-]+)\s?}}`)
// matching:
//
//
-//
+//
+//
+//
//
//
// noinspection RegExpRedundantEscape
var xmlValueMatchRegex = regexp.MustCompile(`^\[([\w]+)='(.*)'\]$`)
-// Gets the []byte representation of a configuration file to be passed through to other
-// handler functions. If the file does not currently exist, it will be created.
-func readFileBytes(path string) ([]byte, error) {
- file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
- if err != nil {
- return nil, err
- }
- defer file.Close()
-
- return io.ReadAll(file)
-}
-
// Gets the value of a key based on the value type defined.
func (cfr *ConfigurationFileReplacement) getKeyValue(value string) interface{} {
if cfr.ReplaceWith.Type() == jsonparser.Boolean {
diff --git a/parser/parser.go b/parser/parser.go
index 326d442..3b5e5e2 100644
--- a/parser/parser.go
+++ b/parser/parser.go
@@ -2,8 +2,8 @@ package parser
import (
"bufio"
- "os"
- "path/filepath"
+ "bytes"
+ "io"
"strconv"
"strings"
@@ -18,6 +18,7 @@ import (
"gopkg.in/yaml.v3"
"github.com/Tech-Gamer/nwy-wings/config"
+ "github.com/Tech-Gamer/nwy-wings/internal/ufs"
)
// The file parsing options that are available for a server configuration file.
@@ -74,6 +75,26 @@ func (cv *ReplaceValue) String() string {
}
}
+func (cv *ReplaceValue) Bytes() []byte {
+ switch cv.Type() {
+ case jsonparser.String:
+ var stackbuf [64]byte
+ bU, err := jsonparser.Unescape(cv.value, stackbuf[:])
+ if err != nil {
+ panic(errors.Wrap(err, "parser: could not parse value"))
+ }
+ return bU
+ case jsonparser.Null:
+ return []byte("")
+ case jsonparser.Boolean:
+ return cv.value
+ case jsonparser.Number:
+ return cv.value
+ default:
+ return []byte("")
+ }
+}
+
type ConfigurationParser string
func (cp ConfigurationParser) String() string {
@@ -167,11 +188,12 @@ func (cfr *ConfigurationFileReplacement) UnmarshalJSON(data []byte) error {
return nil
}
-// Parses a given configuration file and updates all of the values within as defined
-// in the API response from the Panel.
-func (f *ConfigurationFile) Parse(path string, internal bool) error {
- log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
+// Parse parses a given configuration file and updates all the values within
+// as defined in the API response from the Panel.
+func (f *ConfigurationFile) Parse(file ufs.File) error {
+ //log.WithField("path", path).WithField("parser", f.Parser.String()).Debug("parsing server configuration file")
+ // What the fuck is going on here?
if mb, err := json.Marshal(config.Get()); err != nil {
return err
} else {
@@ -182,56 +204,24 @@ func (f *ConfigurationFile) Parse(path string, internal bool) error {
switch f.Parser {
case Properties:
- err = f.parsePropertiesFile(path)
- break
+ err = f.parsePropertiesFile(file)
case File:
- err = f.parseTextFile(path)
- break
+ err = f.parseTextFile(file)
case Yaml, "yml":
- err = f.parseYamlFile(path)
- break
+ err = f.parseYamlFile(file)
case Json:
- err = f.parseJsonFile(path)
- break
+ err = f.parseJsonFile(file)
case Ini:
- err = f.parseIniFile(path)
- break
+ err = f.parseIniFile(file)
case Xml:
- err = f.parseXmlFile(path)
- break
+ err = f.parseXmlFile(file)
}
-
- if errors.Is(err, os.ErrNotExist) {
- // File doesn't exist, we tried creating it, and same error is returned? Pretty
- // sure this pathway is impossible, but if not, abort here.
- if internal {
- return nil
- }
-
- b := strings.TrimSuffix(path, filepath.Base(path))
- if err := os.MkdirAll(b, 0o755); err != nil {
- return errors.WithMessage(err, "failed to create base directory for missing configuration file")
- } else {
- if _, err := os.Create(path); err != nil {
- return errors.WithMessage(err, "failed to create missing configuration file")
- }
- }
-
- return f.Parse(path, true)
- }
-
return err
}
// Parses an xml file.
-func (f *ConfigurationFile) parseXmlFile(path string) error {
+func (f *ConfigurationFile) parseXmlFile(file ufs.File) error {
doc := etree.NewDocument()
- file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
- if err != nil {
- return err
- }
- defer file.Close()
-
if _, err := doc.ReadFrom(file); err != nil {
return err
}
@@ -291,41 +281,27 @@ func (f *ConfigurationFile) parseXmlFile(path string) error {
}
}
- // If you don't truncate the file you'll end up duplicating the data in there (or just appending
- // to the end of the file. We don't want to do that.
+ if _, err := file.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
if err := file.Truncate(0); err != nil {
return err
}
- // Move the cursor to the start of the file to avoid weird spacing issues.
- file.Seek(0, 0)
-
// Ensure the XML is indented properly.
doc.Indent(2)
- // Truncate the file before attempting to write the changes.
- if err := os.Truncate(path, 0); err != nil {
+ // Write the XML to the file.
+ if _, err := doc.WriteTo(file); err != nil {
return err
}
-
- // Write the XML to the file.
- _, err = doc.WriteTo(file)
-
- return err
+ return nil
}
// Parses an ini file.
-func (f *ConfigurationFile) parseIniFile(path string) error {
- // Ini package can't handle a non-existent file, so handle that automatically here
- // by creating it if not exists. Then, immediately close the file since we will use
- // other methods to write the new contents.
- file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
- if err != nil {
- return err
- }
- file.Close()
-
- cfg, err := ini.Load(path)
+func (f *ConfigurationFile) parseIniFile(file ufs.File) error {
+ // Wrap the file in a NopCloser so the ini package doesn't close the file.
+ cfg, err := ini.Load(io.NopCloser(file))
if err != nil {
return err
}
@@ -388,14 +364,24 @@ func (f *ConfigurationFile) parseIniFile(path string) error {
}
}
- return cfg.SaveTo(path)
+ if _, err := file.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ if err := file.Truncate(0); err != nil {
+ return err
+ }
+
+ if _, err := cfg.WriteTo(file); err != nil {
+ return err
+ }
+ return nil
}
// Parses a json file updating any matching key/value pairs. If a match is not found, the
// value is set regardless in the file. See the commentary in parseYamlFile for more details
// about what is happening during this process.
-func (f *ConfigurationFile) parseJsonFile(path string) error {
- b, err := readFileBytes(path)
+func (f *ConfigurationFile) parseJsonFile(file ufs.File) error {
+ b, err := io.ReadAll(file)
if err != nil {
return err
}
@@ -405,14 +391,24 @@ func (f *ConfigurationFile) parseJsonFile(path string) error {
return err
}
- output := []byte(data.StringIndent("", " "))
- return os.WriteFile(path, output, 0o644)
+ if _, err := file.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ if err := file.Truncate(0); err != nil {
+ return err
+ }
+
+ // Write the data to the file.
+ if _, err := io.Copy(file, bytes.NewReader(data.BytesIndent("", " "))); err != nil {
+ return errors.Wrap(err, "parser: failed to write properties file to disk")
+ }
+ return nil
}
// Parses a yaml file and updates any matching key/value pairs before persisting
// it back to the disk.
-func (f *ConfigurationFile) parseYamlFile(path string) error {
- b, err := readFileBytes(path)
+func (f *ConfigurationFile) parseYamlFile(file ufs.File) error {
+ b, err := io.ReadAll(file)
if err != nil {
return err
}
@@ -443,35 +439,56 @@ func (f *ConfigurationFile) parseYamlFile(path string) error {
return err
}
- return os.WriteFile(path, marshaled, 0o644)
+ if _, err := file.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ if err := file.Truncate(0); err != nil {
+ return err
+ }
+
+ // Write the data to the file.
+ if _, err := io.Copy(file, bytes.NewReader(marshaled)); err != nil {
+ return errors.Wrap(err, "parser: failed to write properties file to disk")
+ }
+ return nil
}
// Parses a text file using basic find and replace. This is a highly inefficient method of
// scanning a file and performing a replacement. You should attempt to use anything other
// than this function where possible.
-func (f *ConfigurationFile) parseTextFile(path string) error {
- input, err := os.ReadFile(path)
- if err != nil {
- return err
- }
-
- lines := strings.Split(string(input), "\n")
- for i, line := range lines {
+func (f *ConfigurationFile) parseTextFile(file ufs.File) error {
+ b := bytes.NewBuffer(nil)
+ s := bufio.NewScanner(file)
+ var replaced bool
+ for s.Scan() {
+ line := s.Bytes()
+ replaced = false
for _, replace := range f.Replace {
// If this line doesn't match what we expect for the replacement, move on to the next
// line. Otherwise, update the line to have the replacement value.
- if !strings.HasPrefix(line, replace.Match) {
+ if !bytes.HasPrefix(line, []byte(replace.Match)) {
continue
}
-
- lines[i] = replace.ReplaceWith.String()
+ b.Write(replace.ReplaceWith.Bytes())
+ replaced = true
}
+ if !replaced {
+ b.Write(line)
+ }
+ b.WriteByte('\n')
}
- if err := os.WriteFile(path, []byte(strings.Join(lines, "\n")), 0o644); err != nil {
+ if _, err := file.Seek(0, io.SeekStart); err != nil {
+ return err
+ }
+ if err := file.Truncate(0); err != nil {
return err
}
+ // Write the data to the file.
+ if _, err := io.Copy(file, b); err != nil {
+ return errors.Wrap(err, "parser: failed to write properties file to disk")
+ }
return nil
}
@@ -501,31 +518,29 @@ func (f *ConfigurationFile) parseTextFile(path string) error {
//
// @see https://github.com/pterodactyl/panel/issues/2308 (original)
// @see https://github.com/pterodactyl/panel/issues/3009 ("bug" introduced as result)
-func (f *ConfigurationFile) parsePropertiesFile(path string) error {
- var s strings.Builder
- // Open the file and attempt to load any comments that currenty exist at the start
- // of the file. This is kind of a hack, but should work for a majority of users for
- // the time being.
- if fd, err := os.Open(path); err != nil {
- return errors.Wrap(err, "parser: could not open file for reading")
- } else {
- scanner := bufio.NewScanner(fd)
- // Scan until we hit a line that is not a comment that actually has content
- // on it. Keep appending the comments until that time.
- for scanner.Scan() {
- text := scanner.Text()
- if len(text) > 0 && text[0] != '#' {
- break
- }
- s.WriteString(text + "\n")
- }
- _ = fd.Close()
- if err := scanner.Err(); err != nil {
- return errors.WithStackIf(err)
- }
+func (f *ConfigurationFile) parsePropertiesFile(file ufs.File) error {
+ b, err := io.ReadAll(file)
+ if err != nil {
+ return err
}
- p, err := properties.LoadFile(path, properties.UTF8)
+ s := bytes.NewBuffer(nil)
+ scanner := bufio.NewScanner(bytes.NewReader(b))
+ // Scan until we hit a line that is not a comment that actually has content
+ // on it. Keep appending the comments until that time.
+ for scanner.Scan() {
+ text := scanner.Bytes()
+ if len(text) > 0 && text[0] != '#' {
+ break
+ }
+ s.Write(text)
+ s.WriteByte('\n')
+ }
+ if err := scanner.Err(); err != nil {
+ return errors.WithStackIf(err)
+ }
+
+ p, err := properties.Load(b, properties.UTF8)
if err != nil {
return errors.Wrap(err, "parser: could not load properties file for configuration update")
}
@@ -563,17 +578,16 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
s.WriteString(key + "=" + strings.Trim(strconv.QuoteToASCII(value), "\"") + "\n")
}
- // Open the file for writing.
- w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
- if err != nil {
+ if _, err := file.Seek(0, io.SeekStart); err != nil {
return err
}
- defer w.Close()
-
- // Write the data to the file.
- if _, err := w.Write([]byte(s.String())); err != nil {
- return errors.Wrap(err, "parser: failed to write properties file to disk")
+ if err := file.Truncate(0); err != nil {
+ return err
}
+ // Write the data to the file.
+ if _, err := io.Copy(file, s); err != nil {
+ return errors.Wrap(err, "parser: failed to write properties file to disk")
+ }
return nil
}
diff --git a/router/downloader/downloader.go b/router/downloader/downloader.go
index fad74df..8623a42 100644
--- a/router/downloader/downloader.go
+++ b/router/downloader/downloader.go
@@ -20,20 +20,58 @@ import (
"github.com/Tech-Gamer/nwy-wings/server"
)
-var client = &http.Client{
- Timeout: time.Hour * 12,
- // Disallow any redirect on an HTTP call. This is a security requirement: do not modify
- // this logic without first ensuring that the new target location IS NOT within the current
- // instance's local network.
- //
- // This specific error response just causes the client to not follow the redirect and
- // returns the actual redirect response to the caller. Not perfect, but simple and most
- // people won't be using URLs that redirect anyways hopefully?
- //
- // We'll re-evaluate this down the road if needed.
- CheckRedirect: func(req *http.Request, via []*http.Request) error {
- return http.ErrUseLastResponse
- },
+var client *http.Client
+
+func init() {
+ dialer := &net.Dialer{
+ LocalAddr: nil,
+ }
+
+ trnspt := http.DefaultTransport.(*http.Transport).Clone()
+ trnspt.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
+ c, err := dialer.DialContext(ctx, network, addr)
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+
+ ipStr, _, err := net.SplitHostPort(c.RemoteAddr().String())
+ if err != nil {
+ return c, errors.WithStack(err)
+ }
+ ip := net.ParseIP(ipStr)
+ if ip == nil {
+ return c, errors.WithStack(ErrInvalidIPAddress)
+ }
+ if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
+ return c, errors.WithStack(ErrInternalResolution)
+ }
+ for _, block := range internalRanges {
+ if !block.Contains(ip) {
+ continue
+ }
+ return c, errors.WithStack(ErrInternalResolution)
+ }
+ return c, nil
+ }
+
+ client = &http.Client{
+ Timeout: time.Hour * 12,
+
+ Transport: trnspt,
+
+ // Disallow any redirect on an HTTP call. This is a security requirement: do not modify
+ // this logic without first ensuring that the new target location IS NOT within the current
+ // instance's local network.
+ //
+ // This specific error response just causes the client to not follow the redirect and
+ // returns the actual redirect response to the caller. Not perfect, but simple and most
+ // people won't be using URLs that redirect anyways hopefully?
+ //
+ // We'll re-evaluate this down the road if needed.
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
}
var instance = &Downloader{
@@ -143,12 +181,6 @@ func (dl *Download) Execute() error {
dl.cancelFunc = &cancel
defer dl.Cancel()
- // Always ensure that we're checking the destination for the download to avoid a malicious
- // user from accessing internal network resources.
- if err := dl.isExternalNetwork(ctx); err != nil {
- return err
- }
-
// At this point we have verified the destination is not within the local network, so we can
// now make a request to that URL and pull down the file, saving it to the server's data
// directory.
@@ -167,13 +199,8 @@ func (dl *Download) Execute() error {
return errors.New("downloader: got bad response status from endpoint: " + res.Status)
}
- // If there is a Content-Length header on this request go ahead and check that we can
- // even write the whole file before beginning this process. If there is no header present
- // we'll just have to give it a spin and see how it goes.
- if res.ContentLength > 0 {
- if err := dl.server.Filesystem().HasSpaceFor(res.ContentLength); err != nil {
- return errors.WrapIf(err, "downloader: failed to write file: not enough space")
- }
+ if res.ContentLength < 1 {
+ return errors.New("downloader: request is missing ContentLength")
}
if dl.req.UseHeader {
@@ -200,8 +227,10 @@ func (dl *Download) Execute() error {
p := dl.Path()
dl.server.Log().WithField("path", p).Debug("writing remote file to disk")
+ // Write the file while tracking the progress, Write will check that the
+ // size of the file won't exceed the disk limit.
r := io.TeeReader(res.Body, dl.counter(res.ContentLength))
- if err := dl.server.Filesystem().Writefile(p, r); err != nil {
+ if err := dl.server.Filesystem().Write(p, r, res.ContentLength, 0o644); err != nil {
return errors.WrapIf(err, "downloader: failed to write file to server directory")
}
return nil
@@ -246,59 +275,6 @@ func (dl *Download) counter(contentLength int64) *Counter {
}
}
-// Verifies that a given download resolves to a location not within the current local
-// network for the machine. If the final destination of a resource is within the local
-// network an ErrInternalResolution error is returned.
-func (dl *Download) isExternalNetwork(ctx context.Context) error {
- dialer := &net.Dialer{
- LocalAddr: nil,
- }
-
- host := dl.req.URL.Host
-
- // This cluster-fuck of math and integer shit converts an integer IP into a proper IPv4.
- // For example: 16843009 would become 1.1.1.1
- //if i, err := strconv.ParseInt(host, 10, 64); err == nil {
- // host = strconv.FormatInt((i>>24)&0xFF, 10) + "." + strconv.FormatInt((i>>16)&0xFF, 10) + "." + strconv.FormatInt((i>>8)&0xFF, 10) + "." + strconv.FormatInt(i&0xFF, 10)
- //}
-
- if _, _, err := net.SplitHostPort(host); err != nil {
- if !strings.Contains(err.Error(), "missing port in address") {
- return errors.WithStack(err)
- }
- switch dl.req.URL.Scheme {
- case "http":
- host += ":80"
- case "https":
- host += ":443"
- }
- }
-
- c, err := dialer.DialContext(ctx, "tcp", host)
- if err != nil {
- return errors.WithStack(err)
- }
- _ = c.Close()
-
- ipStr, _, err := net.SplitHostPort(c.RemoteAddr().String())
- if err != nil {
- return errors.WithStack(err)
- }
- ip := net.ParseIP(ipStr)
- if ip == nil {
- return errors.WithStack(ErrInvalidIPAddress)
- }
- if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() {
- return errors.WithStack(ErrInternalResolution)
- }
- for _, block := range internalRanges {
- if block.Contains(ip) {
- return errors.WithStack(ErrInternalResolution)
- }
- }
- return nil
-}
-
// Downloader represents a global downloader that keeps track of all currently processing downloads
// for the machine.
type Downloader struct {
diff --git a/router/router_download.go b/router/router_download.go
index dffd825..405b818 100644
--- a/router/router_download.go
+++ b/router/router_download.go
@@ -8,6 +8,7 @@ import (
"strconv"
"github.com/gin-gonic/gin"
+ "github.com/google/uuid"
"github.com/Tech-Gamer/nwy-wings/router/middleware"
"github.com/Tech-Gamer/nwy-wings/router/tokens"
@@ -19,12 +20,14 @@ func getDownloadBackup(c *gin.Context) {
client := middleware.ExtractApiClient(c)
manager := middleware.ExtractManager(c)
+ // Get the payload from the token.
token := tokens.BackupPayload{}
if err := tokens.ParseToken([]byte(c.Query("token")), &token); err != nil {
middleware.CaptureAndAbort(c, err)
return
}
+ // Get the server using the UUID from the token.
if _, ok := manager.Get(token.ServerUuid); !ok || !token.IsUniqueRequest() {
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
"error": "The requested resource was not found on this server.",
@@ -32,6 +35,14 @@ func getDownloadBackup(c *gin.Context) {
return
}
+ // Validate that the BackupUuid field is actually a UUID and not some random characters or a
+ // file path.
+ if _, err := uuid.Parse(token.BackupUuid); err != nil {
+ middleware.CaptureAndAbort(c, err)
+ return
+ }
+
+ // Locate the backup on the local disk.
b, st, err := backup.LocateLocal(client, token.BackupUuid)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
@@ -45,6 +56,8 @@ func getDownloadBackup(c *gin.Context) {
return
}
+ // The use of `os` here is safe as backups are not stored within server
+ // accessible directories.
f, err := os.Open(b.Path())
if err != nil {
middleware.CaptureAndAbort(c, err)
@@ -76,26 +89,18 @@ func getDownloadFile(c *gin.Context) {
return
}
- p, _ := s.Filesystem().SafePath(token.FilePath)
- st, err := os.Stat(p)
- // If there is an error or we're somehow trying to download a directory, just
- // respond with the appropriate error.
- if err != nil {
- middleware.CaptureAndAbort(c, err)
- return
- } else if st.IsDir() {
- c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
- "error": "The requested resource was not found on this server.",
- })
- return
- }
-
- f, err := os.Open(p)
+ f, st, err := s.Filesystem().File(token.FilePath)
if err != nil {
middleware.CaptureAndAbort(c, err)
return
}
defer f.Close()
+ if st.IsDir() {
+ c.AbortWithStatusJSON(http.StatusNotFound, gin.H{
+ "error": "The requested resource was not found on this server.",
+ })
+ return
+ }
c.Header("Content-Length", strconv.Itoa(int(st.Size())))
c.Header("Content-Disposition", "attachment; filename="+strconv.Quote(st.Name()))
diff --git a/router/router_server.go b/router/router_server.go
index f322633..eeddefd 100644
--- a/router/router_server.go
+++ b/router/router_server.go
@@ -227,19 +227,19 @@ func deleteServer(c *gin.Context) {
//
// In addition, servers with large amounts of files can take some time to finish deleting,
// so we don't want to block the HTTP call while waiting on this.
- go func(p string) {
+ go func(s *server.Server) {
+ fs := s.Filesystem()
+ p := fs.Path()
+ _ = fs.UnixFS().Close()
if err := os.RemoveAll(p); err != nil {
log.WithFields(log.Fields{"path": p, "error": err}).Warn("failed to remove server files during deletion process")
}
- }(s.Filesystem().Path())
+ }(s)
middleware.ExtractManager(c).Remove(func(server *server.Server) bool {
return server.ID() == s.ID()
})
- // Deallocate the reference to this server.
- s = nil
-
c.Status(http.StatusNoContent)
}
diff --git a/router/router_server_files.go b/router/router_server_files.go
index 788b23b..8140b37 100644
--- a/router/router_server_files.go
+++ b/router/router_server_files.go
@@ -30,7 +30,7 @@ import (
// getServerFileContents returns the contents of a file on the server.
func getServerFileContents(c *gin.Context) {
s := middleware.ExtractServer(c)
- p := "/" + strings.TrimLeft(c.Query("file"), "/")
+ p := strings.TrimLeft(c.Query("file"), "/")
f, st, err := s.Filesystem().File(p)
if err != nil {
middleware.CaptureAndAbort(c, err)
@@ -129,7 +129,6 @@ func putServerRenameFiles(c *gin.Context) {
}
if err := fs.Rename(pf, pt); err != nil {
// Return nil if the error is an is not exists.
- // NOTE: os.IsNotExist() does not work if the error is wrapped.
if errors.Is(err, os.ErrNotExist) {
s.Log().WithField("error", err).
WithField("from_path", pf).
@@ -239,7 +238,16 @@ func postServerWriteFile(c *gin.Context) {
middleware.CaptureAndAbort(c, err)
return
}
- if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil {
+
+ // A content length of -1 means the actual length is unknown.
+ if c.Request.ContentLength == -1 {
+ c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
+ "error": "Missing Content-Length",
+ })
+ return
+ }
+
+ if err := s.Filesystem().Write(f, c.Request.Body, c.Request.ContentLength, 0o644); err != nil {
if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": "Cannot write file, name conflicts with an existing directory by the same name.",
@@ -589,15 +597,9 @@ func postServerUploadFiles(c *gin.Context) {
}
for _, header := range headers {
- p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename))
- if err != nil {
- middleware.CaptureAndAbort(c, err)
- return
- }
-
// We run this in a different method so I can use defer without any of
// the consequences caused by calling it in a loop.
- if err := handleFileUpload(p, s, header); err != nil {
+ if err := handleFileUpload(filepath.Join(directory, header.Filename), s, header); err != nil {
middleware.CaptureAndAbort(c, err)
return
} else {
@@ -619,7 +621,8 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader)
if err := s.Filesystem().IsIgnored(p); err != nil {
return err
}
- if err := s.Filesystem().Writefile(p, file); err != nil {
+
+ if err := s.Filesystem().Write(p, file, header.Size, 0o644); err != nil {
return err
}
return nil
diff --git a/router/router_transfer.go b/router/router_transfer.go
index 7211b98..f42824b 100644
--- a/router/router_transfer.go
+++ b/router/router_transfer.go
@@ -106,6 +106,7 @@ func postTransfers(c *gin.Context) {
if !successful && err != nil {
// Delete all extracted files.
go func(trnsfr *transfer.Transfer) {
+ _ = trnsfr.Server.Filesystem().UnixFS().Close()
if err := os.RemoveAll(trnsfr.Server.Filesystem().Path()); err != nil && !os.IsNotExist(err) {
trnsfr.Log().WithError(err).Warn("failed to delete local server files")
}
diff --git a/server/backup.go b/server/backup.go
index f063e12..247b750 100644
--- a/server/backup.go
+++ b/server/backup.go
@@ -67,7 +67,7 @@ func (s *Server) Backup(b backup.BackupInterface) error {
}
}
- ad, err := b.Generate(s.Context(), s.Filesystem().Path(), ignored)
+ ad, err := b.Generate(s.Context(), s.Filesystem(), ignored)
if err != nil {
if err := s.notifyPanelOfBackup(b.Identifier(), &backup.ArchiveDetails{}, false); err != nil {
s.Log().WithFields(log.Fields{
@@ -154,17 +154,14 @@ func (s *Server) RestoreBackup(b backup.BackupInterface, reader io.ReadCloser) (
err = b.Restore(s.Context(), reader, func(file string, info fs.FileInfo, r io.ReadCloser) error {
defer r.Close()
s.Events().Publish(DaemonMessageEvent, "(restoring): "+file)
-
- if err := s.Filesystem().Writefile(file, r); err != nil {
+ // TODO: since this will be called a lot, it may be worth adding an optimized
+ // Write with Chtimes method to the UnixFS that is able to re-use the
+ // same dirfd and file name.
+ if err := s.Filesystem().Write(file, r, info.Size(), info.Mode()); err != nil {
return err
}
- if err := s.Filesystem().Chmod(file, info.Mode()); err != nil {
- return err
- }
-
atime := info.ModTime()
- mtime := atime
- return s.Filesystem().Chtimes(file, atime, mtime)
+ return s.Filesystem().Chtimes(file, atime, atime)
})
return errors.WithStackIf(err)
diff --git a/server/backup/backup.go b/server/backup/backup.go
index 688f618..cc07de1 100644
--- a/server/backup/backup.go
+++ b/server/backup/backup.go
@@ -16,6 +16,9 @@ import (
"github.com/Tech-Gamer/nwy-wings/config"
"github.com/Tech-Gamer/nwy-wings/remote"
+ "github.com/Tech-Gamer/nwy-wings/config"
+ "github.com/Tech-Gamer/nwy-wings/remote"
+ "github.com/Tech-Gamer/nwy-wings/server/filesystem"
)
var format = archiver.CompressedArchive{
@@ -46,7 +49,7 @@ type BackupInterface interface {
WithLogContext(map[string]interface{})
// Generate creates a backup in whatever the configured source for the
// specific implementation is.
- Generate(context.Context, string, string) (*ArchiveDetails, error)
+ Generate(context.Context, *filesystem.Filesystem, string) (*ArchiveDetails, error)
// Ignored returns the ignored files for this backup instance.
Ignored() string
// Checksum returns a SHA1 checksum for the generated backup.
diff --git a/server/backup/backup_local.go b/server/backup/backup_local.go
index 14bf22b..9e09d07 100644
--- a/server/backup/backup_local.go
+++ b/server/backup/backup_local.go
@@ -59,10 +59,10 @@ func (b *LocalBackup) WithLogContext(c map[string]interface{}) {
// Generate generates a backup of the selected files and pushes it to the
// defined location for this instance.
-func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
+func (b *LocalBackup) Generate(ctx context.Context, fsys *filesystem.Filesystem, ignore string) (*ArchiveDetails, error) {
a := &filesystem.Archive{
- BasePath: basePath,
- Ignore: ignore,
+ Filesystem: fsys,
+ Ignore: ignore,
}
b.log().WithField("path", b.Path()).Info("creating backup for server")
diff --git a/server/backup/backup_s3.go b/server/backup/backup_s3.go
index 654b8f9..6844a79 100644
--- a/server/backup/backup_s3.go
+++ b/server/backup/backup_s3.go
@@ -48,12 +48,12 @@ func (s *S3Backup) WithLogContext(c map[string]interface{}) {
// Generate creates a new backup on the disk, moves it into the S3 bucket via
// the provided presigned URL, and then deletes the backup from the disk.
-func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*ArchiveDetails, error) {
+func (s *S3Backup) Generate(ctx context.Context, fsys *filesystem.Filesystem, ignore string) (*ArchiveDetails, error) {
defer s.Remove()
a := &filesystem.Archive{
- BasePath: basePath,
- Ignore: ignore,
+ Filesystem: fsys,
+ Ignore: ignore,
}
s.log().WithField("path", s.Path()).Info("creating backup for server")
diff --git a/server/config_parser.go b/server/config_parser.go
index 4c51724..f7f2302 100644
--- a/server/config_parser.go
+++ b/server/config_parser.go
@@ -4,9 +4,11 @@ import (
"runtime"
"github.com/gammazero/workerpool"
+
+ "github.com/pterodactyl/wings/internal/ufs"
)
-// UpdateConfigurationFiles updates all of the defined configuration files for
+// UpdateConfigurationFiles updates all the defined configuration files for
// a server automatically to ensure that they always use the specified values.
func (s *Server) UpdateConfigurationFiles() {
pool := workerpool.New(runtime.NumCPU())
@@ -18,18 +20,18 @@ func (s *Server) UpdateConfigurationFiles() {
f := cf
pool.Submit(func() {
- p, err := s.Filesystem().SafePath(f.FileName)
+ file, err := s.Filesystem().UnixFS().Touch(f.FileName, ufs.O_RDWR|ufs.O_CREATE, 0o644)
if err != nil {
- s.Log().WithField("error", err).Error("failed to generate safe path for configuration file")
-
+ s.Log().WithField("file_name", f.FileName).WithField("error", err).Error("failed to open file for configuration")
return
}
+ defer file.Close()
- if err := f.Parse(p, false); err != nil {
+ if err := f.Parse(file); err != nil {
s.Log().WithField("error", err).Error("failed to parse and update server configuration file")
}
- s.Log().WithField("path", f.FileName).Debug("finished processing server configuration file")
+ s.Log().WithField("file_name", f.FileName).Debug("finished processing server configuration file")
})
}
diff --git a/server/filesystem/archive.go b/server/filesystem/archive.go
index dffbcf8..84ae92d 100644
--- a/server/filesystem/archive.go
+++ b/server/filesystem/archive.go
@@ -3,7 +3,6 @@ package filesystem
import (
"archive/tar"
"context"
- "fmt"
"io"
"io/fs"
"os"
@@ -14,12 +13,12 @@ import (
"emperror.dev/errors"
"github.com/apex/log"
"github.com/juju/ratelimit"
- "github.com/karrick/godirwalk"
"github.com/klauspost/pgzip"
ignore "github.com/sabhiram/go-gitignore"
"github.com/Tech-Gamer/nwy-wings/config"
"github.com/Tech-Gamer/nwy-wings/internal/progress"
+ "github.com/Tech-Gamer/nwy-wings/internal/ufs"
)
const memory = 4 * 1024
@@ -57,27 +56,35 @@ func (p *TarProgress) Write(v []byte) (int, error) {
}
type Archive struct {
- // BasePath is the absolute path to create the archive from where Files and Ignore are
- // relative to.
- BasePath string
+ // Filesystem to create the archive with.
+ Filesystem *Filesystem
// Ignore is a gitignore string (most likely read from a file) of files to ignore
// from the archive.
Ignore string
- // Files specifies the files to archive, this takes priority over the Ignore option, if
- // unspecified, all files in the BasePath will be archived unless Ignore is set.
- //
- // All items in Files must be absolute within BasePath.
+ // BaseDirectory .
+ BaseDirectory string
+
+ // Files specifies the files to archive, this takes priority over the Ignore
+ // option, if unspecified, all files in the BaseDirectory will be archived
+ // unless Ignore is set.
Files []string
// Progress wraps the writer of the archive to pass through the progress tracker.
Progress *progress.Progress
+
+ w *TarProgress
}
// Create creates an archive at dst with all the files defined in the
// included Files array.
+//
+// THIS IS UNSAFE TO USE IF `dst` IS PROVIDED BY A USER! ONLY USE THIS WITH
+// CONTROLLED PATHS!
func (a *Archive) Create(ctx context.Context, dst string) error {
+ // Using os.OpenFile here is expected, as long as `dst` is not a user
+ // provided path.
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return err
@@ -98,14 +105,28 @@ func (a *Archive) Create(ctx context.Context, dst string) error {
return a.Stream(ctx, writer)
}
-// Stream .
-func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
- for _, f := range a.Files {
- if strings.HasPrefix(f, a.BasePath) {
- continue
- }
+type walkFunc func(dirfd int, name, relative string, d ufs.DirEntry) error
- return fmt.Errorf("archive: all entries in Files must be absolute and within BasePath: %s\n", f)
+// Stream streams the creation of the archive to the given writer.
+func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
+ if a.Filesystem == nil {
+ return errors.New("filesystem: archive.Filesystem is unset")
+ }
+
+ // The base directory may come with a prefixed `/`, strip it to prevent
+ // problems.
+ a.BaseDirectory = strings.TrimPrefix(a.BaseDirectory, "/")
+
+ if filesLen := len(a.Files); filesLen > 0 {
+ files := make([]string, filesLen)
+ for i, f := range a.Files {
+ if !strings.HasPrefix(f, a.Filesystem.Path()) {
+ files[i] = f
+ continue
+ }
+ files[i] = strings.TrimPrefix(strings.TrimPrefix(f, a.Filesystem.Path()), "/")
+ }
+ a.Files = files
}
// Choose which compression level to use based on the compression_level configuration option
@@ -115,8 +136,6 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
compressionLevel = pgzip.NoCompression
case "best_compression":
compressionLevel = pgzip.BestCompression
- case "best_speed":
- fallthrough
default:
compressionLevel = pgzip.BestSpeed
}
@@ -130,107 +149,124 @@ func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
tw := tar.NewWriter(gw)
defer tw.Close()
- pw := NewTarProgress(tw, a.Progress)
+ a.w = NewTarProgress(tw, a.Progress)
- // Configure godirwalk.
- options := &godirwalk.Options{
- FollowSymbolicLinks: false,
- Unsorted: true,
- }
+ fs := a.Filesystem.unixFS
// If we're specifically looking for only certain files, or have requested
// that certain files be ignored we'll update the callback function to reflect
// that request.
- var callback godirwalk.WalkFunc
+ var callback walkFunc
if len(a.Files) == 0 && len(a.Ignore) > 0 {
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
-
- callback = a.callback(pw, func(_ string, rp string) error {
- if i.MatchesPath(rp) {
- return godirwalk.SkipThis
+ callback = a.callback(func(_ int, _, relative string, _ ufs.DirEntry) error {
+ if i.MatchesPath(relative) {
+ return SkipThis
}
-
return nil
})
} else if len(a.Files) > 0 {
- callback = a.withFilesCallback(pw)
+ callback = a.withFilesCallback()
} else {
- callback = a.callback(pw)
+ callback = a.callback()
}
- // Set the callback function, wrapped with support for context cancellation.
- options.Callback = func(path string, de *godirwalk.Dirent) error {
+ // Open the base directory we were provided.
+ dirfd, name, closeFd, err := fs.SafePath(a.BaseDirectory)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+
+ // Recursively walk the base directory.
+ return fs.WalkDirat(dirfd, name, func(dirfd int, name, relative string, d ufs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
select {
case <-ctx.Done():
return ctx.Err()
default:
- return callback(path, de)
+ return callback(dirfd, name, relative, d)
}
- }
-
- // Recursively walk the path we are archiving.
- return godirwalk.Walk(a.BasePath, options)
+ })
}
// Callback function used to determine if a given file should be included in the archive
// being generated.
-func (a *Archive) callback(tw *TarProgress, opts ...func(path string, relative string) error) func(path string, de *godirwalk.Dirent) error {
- return func(path string, de *godirwalk.Dirent) error {
+func (a *Archive) callback(opts ...walkFunc) walkFunc {
+ // Get the base directory we need to strip when walking.
+ //
+ // This is important as when we are walking, the last part of the base directory
+ // is present on all the paths we walk.
+ var base string
+ if a.BaseDirectory != "" {
+ base = filepath.Base(a.BaseDirectory) + "/"
+ }
+ return func(dirfd int, name, relative string, d ufs.DirEntry) error {
// Skip directories because we are walking them recursively.
- if de.IsDir() {
+ if d.IsDir() {
return nil
}
- relative := filepath.ToSlash(strings.TrimPrefix(path, a.BasePath+string(filepath.Separator)))
+ // If base isn't empty, strip it from the relative path. This fixes an
+ // issue when creating an archive starting from a nested directory.
+ //
+ // See https://github.com/pterodactyl/panel/issues/5030 for more details.
+ if base != "" {
+ relative = strings.TrimPrefix(relative, base)
+ }
// Call the additional options passed to this callback function. If any of them return
// a non-nil error we will exit immediately.
for _, opt := range opts {
- if err := opt(path, relative); err != nil {
+ if err := opt(dirfd, name, relative, d); err != nil {
+ if err == SkipThis {
+ return nil
+ }
return err
}
}
// Add the file to the archive, if it is nested in a directory,
// the directory will be automatically "created" in the archive.
- return a.addToArchive(path, relative, tw)
+ return a.addToArchive(dirfd, name, relative, d)
}
}
+var SkipThis = errors.New("skip this")
+
// Pushes only files defined in the Files key to the final archive.
-func (a *Archive) withFilesCallback(tw *TarProgress) func(path string, de *godirwalk.Dirent) error {
- return a.callback(tw, func(p string, rp string) error {
+func (a *Archive) withFilesCallback() walkFunc {
+ return a.callback(func(_ int, _, relative string, _ ufs.DirEntry) error {
for _, f := range a.Files {
// Allow exact file matches, otherwise check if file is within a parent directory.
//
// The slashes are added in the prefix checks to prevent partial name matches from being
// included in the archive.
- if f != p && !strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(f, "/")+"/") {
+ if f != relative && !strings.HasPrefix(strings.TrimSuffix(relative, "/")+"/", strings.TrimSuffix(f, "/")+"/") {
continue
}
// Once we have a match return a nil value here so that the loop stops and the
// call to this function will correctly include the file in the archive. If there
// are no matches we'll never make it to this line, and the final error returned
- // will be the godirwalk.SkipThis error.
+ // will be the ufs.SkipDir error.
return nil
}
- return godirwalk.SkipThis
+ return SkipThis
})
}
// Adds a given file path to the final archive being created.
-func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
- // Lstat the file, this will give us the same information as Stat except that it will not
- // follow a symlink to its target automatically. This is important to avoid including
- // files that exist outside the server root unintentionally in the backup.
- s, err := os.Lstat(p)
+func (a *Archive) addToArchive(dirfd int, name, relative string, entry ufs.DirEntry) error {
+ s, err := entry.Info()
if err != nil {
- if os.IsNotExist(err) {
+ if errors.Is(err, ufs.ErrNotExist) {
return nil
}
- return errors.WrapIff(err, "failed executing os.Lstat on '%s'", rp)
+ return errors.WrapIff(err, "failed executing os.Lstat on '%s'", name)
}
// Skip socket files as they are unsupported by archive/tar.
@@ -250,7 +286,7 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
if err != nil {
// Ignore the not exist errors specifically, since there is nothing important about that.
if !os.IsNotExist(err) {
- log.WithField("path", rp).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
+ log.WithField("name", name).WithField("readlink_err", err.Error()).Warn("failed reading symlink for target path; skipping...")
}
return nil
}
@@ -259,17 +295,17 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
// Get the tar FileInfoHeader in order to add the file to the archive.
header, err := tar.FileInfoHeader(s, filepath.ToSlash(target))
if err != nil {
- return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", rp)
+ return errors.WrapIff(err, "failed to get tar#FileInfoHeader for '%s'", name)
}
// Fix the header name if the file is not a symlink.
if s.Mode()&fs.ModeSymlink == 0 {
- header.Name = rp
+ header.Name = relative
}
// Write the tar FileInfoHeader to the archive.
- if err := w.WriteHeader(header); err != nil {
- return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", rp)
+ if err := a.w.WriteHeader(header); err != nil {
+ return errors.WrapIff(err, "failed to write tar#FileInfoHeader for '%s'", name)
}
// If the size of the file is less than 1 (most likely for symlinks), skip writing the file.
@@ -291,7 +327,7 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
}
// Open the file.
- f, err := os.Open(p)
+ f, err := a.Filesystem.unixFS.OpenFileat(dirfd, name, ufs.O_RDONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return nil
@@ -301,9 +337,8 @@ func (a *Archive) addToArchive(p string, rp string, w *TarProgress) error {
defer f.Close()
// Copy the file's contents to the archive using our buffer.
- if _, err := io.CopyBuffer(w, io.LimitReader(f, header.Size), buf); err != nil {
+ if _, err := io.CopyBuffer(a.w, io.LimitReader(f, header.Size), buf); err != nil {
return errors.WrapIff(err, "failed to copy '%s' to archive", header.Name)
}
-
return nil
}
diff --git a/server/filesystem/archive_test.go b/server/filesystem/archive_test.go
index 88cb7ce..faf4741 100644
--- a/server/filesystem/archive_test.go
+++ b/server/filesystem/archive_test.go
@@ -20,43 +20,34 @@ func TestArchive_Stream(t *testing.T) {
g.Describe("Archive", func() {
g.AfterEach(func() {
// Reset the filesystem after each run.
- rfs.reset()
- })
-
- g.It("throws an error when passed invalid file paths", func() {
- a := &Archive{
- BasePath: fs.Path(),
- Files: []string{
- // To use the archiver properly, this needs to be filepath.Join(BasePath, "yeet")
- // However, this test tests that we actually validate that behavior.
- "yeet",
- },
- }
-
- g.Assert(a.Create(context.Background(), "")).IsNotNil()
+ _ = fs.TruncateRootDirectory()
})
g.It("creates archive with intended files", func() {
g.Assert(fs.CreateDirectory("test", "/")).IsNil()
g.Assert(fs.CreateDirectory("test2", "/")).IsNil()
- err := fs.Writefile("test/file.txt", strings.NewReader("hello, world!\n"))
+ r := strings.NewReader("hello, world!\n")
+ err := fs.Write("test/file.txt", r, r.Size(), 0o644)
g.Assert(err).IsNil()
- err = fs.Writefile("test2/file.txt", strings.NewReader("hello, world!\n"))
+ r = strings.NewReader("hello, world!\n")
+ err = fs.Write("test2/file.txt", r, r.Size(), 0o644)
g.Assert(err).IsNil()
- err = fs.Writefile("test_file.txt", strings.NewReader("hello, world!\n"))
+ r = strings.NewReader("hello, world!\n")
+ err = fs.Write("test_file.txt", r, r.Size(), 0o644)
g.Assert(err).IsNil()
- err = fs.Writefile("test_file.txt.old", strings.NewReader("hello, world!\n"))
+ r = strings.NewReader("hello, world!\n")
+ err = fs.Write("test_file.txt.old", r, r.Size(), 0o644)
g.Assert(err).IsNil()
a := &Archive{
- BasePath: fs.Path(),
+ Filesystem: fs,
Files: []string{
- filepath.Join(fs.Path(), "test"),
- filepath.Join(fs.Path(), "test_file.txt"),
+ "test",
+ "test_file.txt",
},
}
@@ -119,7 +110,7 @@ func getFiles(f iofs.ReadDirFS, name string) ([]string, error) {
if files == nil {
return nil, nil
}
-
+
v = append(v, files...)
continue
}
diff --git a/server/filesystem/archiverext/compressed.go b/server/filesystem/archiverext/compressed.go
new file mode 100644
index 0000000..3dafee9
--- /dev/null
+++ b/server/filesystem/archiverext/compressed.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: MIT
+// SPDX-FileCopyrightText: Copyright (c) 2016 Matthew Holt
+
+// Code in this file was derived from
+// https://github.com/mholt/archiver/blob/v4.0.0-alpha.8/fs.go
+//
+// These modifications were necessary to allow us to use an already open file
+// with archiver.FileFS.
+
+package archiverext
+
+import (
+ "io"
+ "io/fs"
+
+ "github.com/mholt/archiver/v4"
+)
+
+// FileFS allows accessing a file on disk using a consistent file system interface.
+// The value should be the path to a regular file, not a directory. This file will
+// be the only entry in the file system and will be at its root. It can be accessed
+// within the file system by the name of "." or the filename.
+//
+// If the file is compressed, set the Compression field so that reads from the
+// file will be transparently decompressed.
+type FileFS struct {
+ // File is the compressed file backing the FileFS.
+ File fs.File
+
+ // If file is compressed, setting this field will
+ // transparently decompress reads.
+ Compression archiver.Decompressor
+}
+
+// Open opens the named file, which must be the file used to create the file system.
+func (f FileFS) Open(name string) (fs.File, error) {
+ if err := f.checkName(name, "open"); err != nil {
+ return nil, err
+ }
+ if f.Compression == nil {
+ return f.File, nil
+ }
+ r, err := f.Compression.OpenReader(f.File)
+ if err != nil {
+ return nil, err
+ }
+ return compressedFile{f.File, r}, nil
+}
+
+// ReadDir returns a directory listing with the file as the singular entry.
+func (f FileFS) ReadDir(name string) ([]fs.DirEntry, error) {
+ if err := f.checkName(name, "stat"); err != nil {
+ return nil, err
+ }
+ info, err := f.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ return []fs.DirEntry{fs.FileInfoToDirEntry(info)}, nil
+}
+
+// Stat stats the named file, which must be the file used to create the file system.
+func (f FileFS) Stat(name string) (fs.FileInfo, error) {
+ if err := f.checkName(name, "stat"); err != nil {
+ return nil, err
+ }
+ return f.File.Stat()
+}
+
+func (f FileFS) checkName(name, op string) error {
+ if !fs.ValidPath(name) {
+ return &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid}
+ }
+ // TODO: we may need better name validation.
+ if name != "." {
+ return &fs.PathError{Op: op, Path: name, Err: fs.ErrNotExist}
+ }
+ return nil
+}
+
+// compressedFile is an fs.File that specially reads
+// from a decompression reader, and which closes both
+// that reader and the underlying file.
+type compressedFile struct {
+ fs.File
+ decomp io.ReadCloser
+}
+
+func (cf compressedFile) Read(p []byte) (int, error) {
+ return cf.decomp.Read(p)
+}
+
+func (cf compressedFile) Close() error {
+ err := cf.File.Close()
+ err2 := cf.decomp.Close()
+ if err2 != nil && err == nil {
+ err = err2
+ }
+ return err
+}
diff --git a/server/filesystem/compress.go b/server/filesystem/compress.go
index ed2810d..bcf2873 100644
--- a/server/filesystem/compress.go
+++ b/server/filesystem/compress.go
@@ -5,7 +5,6 @@ import (
"fmt"
"io"
iofs "io/fs"
- "os"
"path"
"path/filepath"
"strings"
@@ -13,7 +12,11 @@ import (
"time"
"emperror.dev/errors"
+ "github.com/klauspost/compress/zip"
"github.com/mholt/archiver/v4"
+
+ "github.com/pterodactyl/wings/internal/ufs"
+ "github.com/pterodactyl/wings/server/filesystem/archiverext"
)
// CompressFiles compresses all the files matching the given paths in the
@@ -25,46 +28,70 @@ import (
// All paths are relative to the dir that is passed in as the first argument,
// and the compressed file will be placed at that location named
// `archive-{date}.tar.gz`.
-func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, error) {
- cleanedRootDir, err := fs.SafePath(dir)
- if err != nil {
- return nil, err
- }
-
- // Take all the paths passed in and merge them together with the root directory we've gotten.
- for i, p := range paths {
- paths[i] = filepath.Join(cleanedRootDir, p)
- }
-
- cleaned, err := fs.ParallelSafePath(paths)
- if err != nil {
- return nil, err
- }
-
- a := &Archive{BasePath: cleanedRootDir, Files: cleaned}
+func (fs *Filesystem) CompressFiles(dir string, paths []string) (ufs.FileInfo, error) {
+ a := &Archive{Filesystem: fs, BaseDirectory: dir, Files: paths}
d := path.Join(
- cleanedRootDir,
+ dir,
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
)
-
- if err := a.Create(context.Background(), d); err != nil {
- return nil, err
- }
-
- f, err := os.Stat(d)
+ f, err := fs.unixFS.OpenFile(d, ufs.O_WRONLY|ufs.O_CREATE, 0o644)
if err != nil {
- _ = os.Remove(d)
+ return nil, err
+ }
+ defer f.Close()
+ cw := ufs.NewCountedWriter(f)
+ if err := a.Stream(context.Background(), cw); err != nil {
+ return nil, err
+ }
+ if !fs.unixFS.CanFit(cw.BytesWritten()) {
+ _ = fs.unixFS.Remove(d)
+ return nil, newFilesystemError(ErrCodeDiskSpace, nil)
+ }
+ fs.unixFS.Add(cw.BytesWritten())
+ return f.Stat()
+}
+
+func (fs *Filesystem) archiverFileSystem(ctx context.Context, p string) (iofs.FS, error) {
+ f, err := fs.unixFS.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ // Do not use defer to close `f`, it will likely be used later.
+
+ format, _, err := archiver.Identify(filepath.Base(p), f)
+ if err != nil && !errors.Is(err, archiver.ErrNoMatch) {
+ _ = f.Close()
return nil, err
}
- if err := fs.HasSpaceFor(f.Size()); err != nil {
- _ = os.Remove(d)
+ // Reset the file reader.
+ if _, err := f.Seek(0, io.SeekStart); err != nil {
+ _ = f.Close()
return nil, err
}
- fs.addDisk(f.Size())
+ info, err := f.Stat()
+ if err != nil {
+ _ = f.Close()
+ return nil, err
+ }
- return f, nil
+ if format != nil {
+ switch ff := format.(type) {
+ case archiver.Zip:
+ // zip.Reader is more performant than ArchiveFS, because zip.Reader caches content information
+ // and zip.Reader can open several content files concurrently because of io.ReaderAt requirement
+ // while ArchiveFS can't.
+ // zip.Reader doesn't suffer from issue #330 and #310 according to local test (but they should be fixed anyway)
+ return zip.NewReader(f, info.Size())
+ case archiver.Archival:
+ return archiver.ArchiveFS{Stream: io.NewSectionReader(f, 0, info.Size()), Format: ff, Context: ctx}, nil
+ case archiver.Compression:
+ return archiverext.FileFS{File: f, Compression: ff}, nil
+ }
+ }
+ _ = f.Close()
+ return nil, archiver.ErrNoMatch
}
// SpaceAvailableForDecompression looks through a given archive and determines
@@ -76,16 +103,7 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
return nil
}
- source, err := fs.SafePath(filepath.Join(dir, file))
- if err != nil {
- return err
- }
-
- // Get the cached size in a parallel process so that if it is not cached we are not
- // waiting an unnecessary amount of time on this call.
- dirSize, err := fs.DiskUsage(false)
-
- fsys, err := archiver.FileSystem(ctx, source)
+ fsys, err := fs.archiverFileSystem(ctx, filepath.Join(dir, file))
if err != nil {
if errors.Is(err, archiver.ErrNoMatch) {
return newFilesystemError(ErrCodeUnknownArchive, err)
@@ -93,7 +111,7 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
return err
}
- var size int64
+ var size atomic.Int64
return iofs.WalkDir(fsys, ".", func(path string, d iofs.DirEntry, err error) error {
if err != nil {
return err
@@ -108,7 +126,7 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
if err != nil {
return err
}
- if atomic.AddInt64(&size, info.Size())+dirSize > fs.MaxDisk() {
+ if !fs.unixFS.CanFit(size.Add(info.Size())) {
return newFilesystemError(ErrCodeDiskSpace, nil)
}
return nil
@@ -122,23 +140,7 @@ func (fs *Filesystem) SpaceAvailableForDecompression(ctx context.Context, dir st
// zip-slip attack being attempted by validating that the final path is within
// the server data directory.
func (fs *Filesystem) DecompressFile(ctx context.Context, dir string, file string) error {
- source, err := fs.SafePath(filepath.Join(dir, file))
- if err != nil {
- return err
- }
- return fs.DecompressFileUnsafe(ctx, dir, source)
-}
-
-// DecompressFileUnsafe will decompress any file on the local disk without checking
-// if it is owned by the server. The file will be SAFELY decompressed and extracted
-// into the server's directory.
-func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file string) error {
- // Ensure that the archive actually exists on the system.
- if _, err := os.Stat(file); err != nil {
- return errors.WithStack(err)
- }
-
- f, err := os.Open(file)
+ f, err := fs.unixFS.Open(filepath.Join(dir, file))
if err != nil {
return err
}
@@ -169,7 +171,6 @@ func (fs *Filesystem) ExtractStreamUnsafe(ctx context.Context, dir string, r io.
}
return err
}
-
return fs.extractStream(ctx, extractStreamOptions{
Directory: dir,
Format: format,
@@ -190,34 +191,31 @@ type extractStreamOptions struct {
func (fs *Filesystem) extractStream(ctx context.Context, opts extractStreamOptions) error {
// Decompress and extract archive
- if ex, ok := opts.Format.(archiver.Extractor); ok {
- return ex.Extract(ctx, opts.Reader, nil, func(ctx context.Context, f archiver.File) error {
- if f.IsDir() {
- return nil
- }
- p := filepath.Join(opts.Directory, f.NameInArchive)
- // If it is ignored, just don't do anything with the file and skip over it.
- if err := fs.IsIgnored(p); err != nil {
- return nil
- }
- r, err := f.Open()
- if err != nil {
- return err
- }
- defer r.Close()
- if err := fs.Writefile(p, r); err != nil {
- return wrapError(err, opts.FileName)
- }
- // Update the file permissions to the one set in the archive.
- if err := fs.Chmod(p, f.Mode()); err != nil {
- return wrapError(err, opts.FileName)
- }
- // Update the file modification time to the one set in the archive.
- if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
- return wrapError(err, opts.FileName)
- }
- return nil
- })
+ ex, ok := opts.Format.(archiver.Extractor)
+ if !ok {
+ return nil
}
- return nil
+ return ex.Extract(ctx, opts.Reader, nil, func(ctx context.Context, f archiver.File) error {
+ if f.IsDir() {
+ return nil
+ }
+ p := filepath.Join(opts.Directory, f.NameInArchive)
+ // If it is ignored, just don't do anything with the file and skip over it.
+ if err := fs.IsIgnored(p); err != nil {
+ return nil
+ }
+ r, err := f.Open()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+ if err := fs.Write(p, r, f.Size(), f.Mode()); err != nil {
+ return wrapError(err, opts.FileName)
+ }
+ // Update the file modification time to the one set in the archive.
+ if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
+ return wrapError(err, opts.FileName)
+ }
+ return nil
+ })
}
diff --git a/server/filesystem/compress_test.go b/server/filesystem/compress_test.go
index d287424..80cf708 100644
--- a/server/filesystem/compress_test.go
+++ b/server/filesystem/compress_test.go
@@ -3,17 +3,18 @@ package filesystem
import (
"context"
"os"
- "sync/atomic"
"testing"
. "github.com/franela/goblin"
)
// Given an archive named test.{ext}, with the following file structure:
+//
// test/
// |──inside/
// |────finside.txt
// |──outside.txt
+//
// this test will ensure that it's being decompressed as expected
func TestFilesystem_DecompressFile(t *testing.T) {
g := Goblin(t)
@@ -47,9 +48,7 @@ func TestFilesystem_DecompressFile(t *testing.T) {
}
g.AfterEach(func() {
- rfs.reset()
- atomic.StoreInt64(&fs.diskUsed, 0)
- atomic.StoreInt64(&fs.diskLimit, 0)
+ _ = fs.TruncateRootDirectory()
})
})
}
diff --git a/server/filesystem/disk_space.go b/server/filesystem/disk_space.go
index f7f2c7b..eb8247a 100644
--- a/server/filesystem/disk_space.go
+++ b/server/filesystem/disk_space.go
@@ -3,24 +3,25 @@ package filesystem
import (
"sync"
"sync/atomic"
- "syscall"
"time"
"emperror.dev/errors"
"github.com/apex/log"
- "github.com/karrick/godirwalk"
+
+ "github.com/pterodactyl/wings/internal/ufs"
)
type SpaceCheckingOpts struct {
AllowStaleResponse bool
}
+// TODO: can this be replaced with some sort of atomic? Like atomic.Pointer?
type usageLookupTime struct {
sync.RWMutex
value time.Time
}
-// Update the last time that a disk space lookup was performed.
+// Set sets the last time that a disk space lookup was performed.
func (ult *usageLookupTime) Set(t time.Time) {
ult.Lock()
ult.value = t
@@ -35,14 +36,15 @@ func (ult *usageLookupTime) Get() time.Time {
return ult.value
}
-// Returns the maximum amount of disk space that this Filesystem instance is allowed to use.
+// MaxDisk returns the maximum amount of disk space that this Filesystem
+// instance is allowed to use.
func (fs *Filesystem) MaxDisk() int64 {
- return atomic.LoadInt64(&fs.diskLimit)
+ return fs.unixFS.Limit()
}
-// Sets the disk space limit for this Filesystem instance.
+// SetDiskLimit sets the disk space limit for this Filesystem instance.
func (fs *Filesystem) SetDiskLimit(i int64) {
- atomic.SwapInt64(&fs.diskLimit, i)
+ fs.unixFS.SetLimit(i)
}
// The same concept as HasSpaceAvailable however this will return an error if there is
@@ -65,7 +67,7 @@ func (fs *Filesystem) HasSpaceErr(allowStaleValue bool) error {
func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
size, err := fs.DiskUsage(allowStaleValue)
if err != nil {
- log.WithField("root", fs.root).WithField("error", err).Warn("failed to determine root fs directory size")
+ log.WithField("root", fs.Path()).WithField("error", err).Warn("failed to determine root fs directory size")
}
// If space is -1 or 0 just return true, means they're allowed unlimited.
@@ -84,7 +86,7 @@ func (fs *Filesystem) HasSpaceAvailable(allowStaleValue bool) bool {
// function for critical logical checks. It should only be used in areas where the actual disk usage
// does not need to be perfect, e.g. API responses for server resource usage.
func (fs *Filesystem) CachedUsage() int64 {
- return atomic.LoadInt64(&fs.diskUsed)
+ return fs.unixFS.Usage()
}
// Internal helper function to allow other parts of the codebase to check the total used disk space
@@ -114,14 +116,14 @@ func (fs *Filesystem) DiskUsage(allowStaleValue bool) (int64, error) {
// currently performing a lookup, just do the disk usage calculation in the background.
go func(fs *Filesystem) {
if _, err := fs.updateCachedDiskUsage(); err != nil {
- log.WithField("root", fs.root).WithField("error", err).Warn("failed to update fs disk usage from within routine")
+ log.WithField("root", fs.Path()).WithField("error", err).Warn("failed to update fs disk usage from within routine")
}
}(fs)
}
}
// Return the currently cached value back to the calling function.
- return atomic.LoadInt64(&fs.diskUsed), nil
+ return fs.unixFS.Usage(), nil
}
// Updates the currently used disk space for a server.
@@ -149,63 +151,46 @@ func (fs *Filesystem) updateCachedDiskUsage() (int64, error) {
// error encountered.
fs.lastLookupTime.Set(time.Now())
- atomic.StoreInt64(&fs.diskUsed, size)
+ fs.unixFS.SetUsage(size)
return size, err
}
-// Determines the directory size of a given location by running parallel tasks to iterate
-// through all of the folders. Returns the size in bytes. This can be a fairly taxing operation
-// on locations with tons of files, so it is recommended that you cache the output.
-func (fs *Filesystem) DirectorySize(dir string) (int64, error) {
- d, err := fs.SafePath(dir)
+// DirectorySize calculates the size of a directory and its descendants.
+func (fs *Filesystem) DirectorySize(root string) (int64, error) {
+ dirfd, name, closeFd, err := fs.unixFS.SafePath(root)
+ defer closeFd()
if err != nil {
return 0, err
}
- var size int64
- var st syscall.Stat_t
-
- err = godirwalk.Walk(d, &godirwalk.Options{
- Unsorted: true,
- Callback: func(p string, e *godirwalk.Dirent) error {
- // If this is a symlink then resolve the final destination of it before trying to continue walking
- // over its contents. If it resolves outside the server data directory just skip everything else for
- // it. Otherwise, allow it to continue.
- if e.IsSymlink() {
- if _, err := fs.SafePath(p); err != nil {
- if IsErrorCode(err, ErrCodePathResolution) {
- return godirwalk.SkipThis
- }
-
- return err
- }
- }
-
- if !e.IsDir() {
- _ = syscall.Lstat(p, &st)
- atomic.AddInt64(&size, st.Size)
- }
+ var size atomic.Int64
+ err = fs.unixFS.WalkDirat(dirfd, name, func(dirfd int, name, _ string, d ufs.DirEntry, err error) error {
+ if err != nil {
+ return errors.Wrap(err, "walkdirat err")
+ }
+ // Only calculate the size of regular files.
+ if !d.Type().IsRegular() {
return nil
- },
- })
+ }
- return size, errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
+ info, err := fs.unixFS.Lstatat(dirfd, name)
+ if err != nil {
+ return errors.Wrap(err, "lstatat err")
+ }
+
+ // TODO: detect if info is a hard-link and de-duplicate it.
+ // ref; https://github.com/pterodactyl/wings/pull/181/files
+
+ size.Add(info.Size())
+ return nil
+ })
+ return size.Load(), errors.WrapIf(err, "server/filesystem: directorysize: failed to walk directory")
}
-// Helper function to determine if a server has space available for a file of a given size.
-// If space is available, no error will be returned, otherwise an ErrNotEnoughSpace error
-// will be raised.
func (fs *Filesystem) HasSpaceFor(size int64) error {
- if fs.MaxDisk() == 0 {
- return nil
- }
- s, err := fs.DiskUsage(true)
- if err != nil {
- return err
- }
- if (s + size) > fs.MaxDisk() {
+ if !fs.unixFS.CanFit(size) {
return newFilesystemError(ErrCodeDiskSpace, nil)
}
return nil
@@ -213,24 +198,5 @@ func (fs *Filesystem) HasSpaceFor(size int64) error {
// Updates the disk usage for the Filesystem instance.
func (fs *Filesystem) addDisk(i int64) int64 {
- size := atomic.LoadInt64(&fs.diskUsed)
-
- // Sorry go gods. This is ugly but the best approach I can come up with for right
- // now without completely re-evaluating the logic we use for determining disk space.
- //
- // Normally I would just be using the atomic load right below, but I'm not sure about
- // the scenarios where it is 0 because nothing has run that would trigger a disk size
- // calculation?
- //
- // Perhaps that isn't even a concern for the sake of this?
- if !fs.isTest {
- size, _ = fs.DiskUsage(true)
- }
-
- // If we're dropping below 0 somehow just cap it to 0.
- if (size + i) < 0 {
- return atomic.SwapInt64(&fs.diskUsed, 0)
- }
-
- return atomic.AddInt64(&fs.diskUsed, i)
+ return fs.unixFS.Add(i)
}
diff --git a/server/filesystem/errors.go b/server/filesystem/errors.go
index afae74a..b977fe6 100644
--- a/server/filesystem/errors.go
+++ b/server/filesystem/errors.go
@@ -2,11 +2,12 @@ package filesystem
import (
"fmt"
- "os"
"path/filepath"
"emperror.dev/errors"
"github.com/apex/log"
+
+ "github.com/pterodactyl/wings/internal/ufs"
)
type ErrorCode string
@@ -86,15 +87,15 @@ func (e *Error) Unwrap() error {
// Generates an error logger instance with some basic information.
func (fs *Filesystem) error(err error) *log.Entry {
- return log.WithField("subsystem", "filesystem").WithField("root", fs.root).WithField("error", err)
+ return log.WithField("subsystem", "filesystem").WithField("root", fs.Path()).WithField("error", err)
}
// Handle errors encountered when walking through directories.
//
// If there is a path resolution error just skip the item entirely. Only return this for a
// directory, otherwise return nil. Returning this error for a file will stop the walking
-// for the remainder of the directory. This is assuming an os.FileInfo struct was even returned.
-func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error {
+// for the remainder of the directory. This is assuming an FileInfo struct was even returned.
+func (fs *Filesystem) handleWalkerError(err error, f ufs.FileInfo) error {
if !IsErrorCode(err, ErrCodePathResolution) {
return err
}
diff --git a/server/filesystem/filesystem.go b/server/filesystem/filesystem.go
index 224d8d0..2e4dfe5 100644
--- a/server/filesystem/filesystem.go
+++ b/server/filesystem/filesystem.go
@@ -1,13 +1,11 @@
package filesystem
import (
- "bufio"
+ "fmt"
"io"
- "io/ioutil"
"os"
- "path"
"path/filepath"
- "sort"
+ "slices"
"strconv"
"strings"
"sync"
@@ -15,220 +13,208 @@ import (
"time"
"emperror.dev/errors"
+ "github.com/apex/log"
"github.com/gabriel-vasile/mimetype"
- "github.com/karrick/godirwalk"
ignore "github.com/sabhiram/go-gitignore"
"github.com/Tech-Gamer/nwy-wings/config"
- "github.com/Tech-Gamer/nwy-wings/system"
+ "github.com/Tech-Gamer/nwy-wings/internal/ufs"
)
type Filesystem struct {
+ unixFS *ufs.Quota
+
mu sync.RWMutex
lastLookupTime *usageLookupTime
- lookupInProgress *system.AtomicBool
- diskUsed int64
+ lookupInProgress atomic.Bool
diskCheckInterval time.Duration
denylist *ignore.GitIgnore
- // The maximum amount of disk space (in bytes) that this Filesystem instance can use.
- diskLimit int64
-
- // The root data directory path for this Filesystem instance.
- root string
-
isTest bool
}
// New creates a new Filesystem instance for a given server.
-func New(root string, size int64, denylist []string) *Filesystem {
+func New(root string, size int64, denylist []string) (*Filesystem, error) {
+ if err := os.MkdirAll(root, 0o755); err != nil {
+ return nil, err
+ }
+ unixFS, err := ufs.NewUnixFS(root, config.UseOpenat2())
+ if err != nil {
+ return nil, err
+ }
+ quota := ufs.NewQuota(unixFS, size)
+
return &Filesystem{
- root: root,
- diskLimit: size,
+ unixFS: quota,
+
diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval),
lastLookupTime: &usageLookupTime{},
- lookupInProgress: system.NewAtomicBool(false),
denylist: ignore.CompileIgnoreLines(denylist...),
- }
+ }, nil
}
// Path returns the root path for the Filesystem instance.
func (fs *Filesystem) Path() string {
- return fs.root
+ return fs.unixFS.BasePath()
+}
+
+// ReadDir reads directory entries.
+func (fs *Filesystem) ReadDir(path string) ([]ufs.DirEntry, error) {
+ return fs.unixFS.ReadDir(path)
+}
+
+// ReadDirStat is like ReadDir except that it returns FileInfo for each entry
+// instead of just a DirEntry.
+func (fs *Filesystem) ReadDirStat(path string) ([]ufs.FileInfo, error) {
+ return ufs.ReadDirMap(fs.unixFS.UnixFS, path, func(e ufs.DirEntry) (ufs.FileInfo, error) {
+ return e.Info()
+ })
}
// File returns a reader for a file instance as well as the stat information.
-func (fs *Filesystem) File(p string) (*os.File, Stat, error) {
- cleaned, err := fs.SafePath(p)
+func (fs *Filesystem) File(p string) (ufs.File, Stat, error) {
+ f, err := fs.unixFS.Open(p)
if err != nil {
- return nil, Stat{}, errors.WithStackIf(err)
+ return nil, Stat{}, err
}
- st, err := fs.Stat(cleaned)
+ st, err := statFromFile(f)
if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- return nil, Stat{}, newFilesystemError(ErrNotExist, err)
- }
- return nil, Stat{}, errors.WithStackIf(err)
- }
- if st.IsDir() {
- return nil, Stat{}, newFilesystemError(ErrCodeIsDirectory, nil)
- }
- f, err := os.Open(cleaned)
- if err != nil {
- return nil, Stat{}, errors.WithStackIf(err)
+ _ = f.Close()
+ return nil, Stat{}, err
}
return f, st, nil
}
+func (fs *Filesystem) UnixFS() *ufs.UnixFS {
+ return fs.unixFS.UnixFS
+}
+
// Touch acts by creating the given file and path on the disk if it is not present
// already. If it is present, the file is opened using the defaults which will truncate
// the contents. The opened file is then returned to the caller.
-func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
- cleaned, err := fs.SafePath(p)
- if err != nil {
- return nil, err
- }
- f, err := os.OpenFile(cleaned, flag, 0o644)
- if err == nil {
- return f, nil
- }
- if f != nil {
- _ = f.Close()
- }
- // If the error is not because it doesn't exist then we just need to bail at this point.
- if !errors.Is(err, os.ErrNotExist) {
- return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file handle")
- }
- // Only create and chown the directory if it doesn't exist.
- if _, err := os.Stat(filepath.Dir(cleaned)); errors.Is(err, os.ErrNotExist) {
- // Create the path leading up to the file we're trying to create, setting the final perms
- // on it as we go.
- if err := os.MkdirAll(filepath.Dir(cleaned), 0o755); err != nil {
- return nil, errors.Wrap(err, "server/filesystem: touch: failed to create directory tree")
- }
- if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
- return nil, err
- }
- }
- o := &fileOpener{}
- // Try to open the file now that we have created the pathing necessary for it, and then
- // Chown that file so that the permissions don't mess with things.
- f, err = o.open(cleaned, flag, 0o644)
- if err != nil {
- return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file with wait")
- }
- _ = fs.Chown(cleaned)
- return f, nil
+func (fs *Filesystem) Touch(p string, flag int) (ufs.File, error) {
+ return fs.unixFS.Touch(p, flag, 0o644)
}
// Writefile writes a file to the system. If the file does not already exist one
// will be created. This will also properly recalculate the disk space used by
// the server when writing new files or modifying existing ones.
+//
+// DEPRECATED: use `Write` instead.
func (fs *Filesystem) Writefile(p string, r io.Reader) error {
- cleaned, err := fs.SafePath(p)
- if err != nil {
- return err
- }
-
var currentSize int64
- // If the file does not exist on the system already go ahead and create the pathway
- // to it and an empty file. We'll then write to it later on after this completes.
- stat, err := os.Stat(cleaned)
- if err != nil && !os.IsNotExist(err) {
+ st, err := fs.unixFS.Stat(p)
+ if err != nil && !errors.Is(err, ufs.ErrNotExist) {
return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
} else if err == nil {
- if stat.IsDir() {
- return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: cleaned})
+ if st.IsDir() {
+ // TODO: resolved
+ return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: ""})
}
- currentSize = stat.Size()
+ currentSize = st.Size()
+ }
+
+ // Touch the file and return the handle to it at this point. This will
+ // create or truncate the file, and create any necessary parent directories
+ // if they are missing.
+ file, err := fs.unixFS.Touch(p, ufs.O_RDWR|ufs.O_TRUNC, 0o644)
+ if err != nil {
+ return fmt.Errorf("error touching file: %w", err)
+ }
+ defer file.Close()
+
+ // Do not use CopyBuffer here, it is wasteful as the file implements
+ // io.ReaderFrom, which causes it to not use the buffer anyways.
+ n, err := io.Copy(file, r)
+
+ // Adjust the disk usage to account for the old size and the new size of the file.
+ fs.unixFS.Add(n - currentSize)
+
+ if err := fs.chownFile(p); err != nil {
+ return fmt.Errorf("error chowning file: %w", err)
+ }
+ // Return the error from io.Copy.
+ return err
+}
+
+func (fs *Filesystem) Write(p string, r io.Reader, newSize int64, mode ufs.FileMode) error {
+ var currentSize int64
+ st, err := fs.unixFS.Stat(p)
+ if err != nil && !errors.Is(err, ufs.ErrNotExist) {
+ return errors.Wrap(err, "server/filesystem: writefile: failed to stat file")
+ } else if err == nil {
+ if st.IsDir() {
+ // TODO: resolved
+ return errors.WithStack(&Error{code: ErrCodeIsDirectory, resolved: ""})
+ }
+ currentSize = st.Size()
}
- br := bufio.NewReader(r)
// Check that the new size we're writing to the disk can fit. If there is currently
// a file we'll subtract that current file size from the size of the buffer to determine
// the amount of new data we're writing (or amount we're removing if smaller).
- if err := fs.HasSpaceFor(int64(br.Size()) - currentSize); err != nil {
+ if err := fs.HasSpaceFor(newSize - currentSize); err != nil {
return err
}
- // Touch the file and return the handle to it at this point. This will create the file,
- // any necessary directories, and set the proper owner of the file.
- file, err := fs.Touch(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
+ // Touch the file and return the handle to it at this point. This will
+ // create or truncate the file, and create any necessary parent directories
+ // if they are missing.
+ file, err := fs.unixFS.Touch(p, ufs.O_RDWR|ufs.O_TRUNC, mode)
if err != nil {
return err
}
defer file.Close()
- buf := make([]byte, 1024*4)
- sz, err := io.CopyBuffer(file, r, buf)
+ if newSize == 0 {
+ // Subtract the previous size of the file if the new size is 0.
+ fs.unixFS.Add(-currentSize)
+ } else {
+ // Do not use CopyBuffer here, it is wasteful as the file implements
+ // io.ReaderFrom, which causes it to not use the buffer anyways.
+ var n int64
+ n, err = io.Copy(file, io.LimitReader(r, newSize))
- // Adjust the disk usage to account for the old size and the new size of the file.
- fs.addDisk(sz - currentSize)
+ // Adjust the disk usage to account for the old size and the new size of the file.
+ fs.unixFS.Add(n - currentSize)
+ }
- return fs.unsafeChown(cleaned)
-}
-
-// Creates a new directory (name) at a specified path (p) for the server.
-func (fs *Filesystem) CreateDirectory(name string, p string) error {
- cleaned, err := fs.SafePath(path.Join(p, name))
- if err != nil {
+ if err := fs.chownFile(p); err != nil {
return err
}
- return os.MkdirAll(cleaned, 0o755)
+ // Return any remaining error.
+ return err
}
-// Rename moves (or renames) a file or directory.
-func (fs *Filesystem) Rename(from string, to string) error {
- cleanedFrom, err := fs.SafePath(from)
- if err != nil {
- return errors.WithStack(err)
- }
-
- cleanedTo, err := fs.SafePath(to)
- if err != nil {
- return errors.WithStack(err)
- }
-
- // If the target file or directory already exists the rename function will fail, so just
- // bail out now.
- if _, err := os.Stat(cleanedTo); err == nil {
- return os.ErrExist
- }
-
- if cleanedTo == fs.Path() {
- return errors.New("attempting to rename into an invalid directory space")
- }
-
- d := strings.TrimSuffix(cleanedTo, path.Base(cleanedTo))
- // Ensure that the directory we're moving into exists correctly on the system. Only do this if
- // we're not at the root directory level.
- if d != fs.Path() {
- if mkerr := os.MkdirAll(d, 0o755); mkerr != nil {
- return errors.WithMessage(mkerr, "failed to create directory structure for file rename")
- }
- }
-
- if err := os.Rename(cleanedFrom, cleanedTo); err != nil {
- return errors.WithStack(err)
- }
- return nil
+// CreateDirectory creates a new directory (name) at a specified path (p) for
+// the server.
+func (fs *Filesystem) CreateDirectory(name string, p string) error {
+ return fs.unixFS.MkdirAll(filepath.Join(p, name), 0o755)
}
-// Recursively iterates over a file or directory and sets the permissions on all of the
+func (fs *Filesystem) Rename(oldpath, newpath string) error {
+ return fs.unixFS.Rename(oldpath, newpath)
+}
+
+func (fs *Filesystem) Symlink(oldpath, newpath string) error {
+ return fs.unixFS.Symlink(oldpath, newpath)
+}
+
+func (fs *Filesystem) chownFile(name string) error {
+ if fs.isTest {
+ return nil
+ }
+
+ uid := config.Get().System.User.Uid
+ gid := config.Get().System.User.Gid
+ return fs.unixFS.Lchown(name, uid, gid)
+}
+
+// Chown recursively iterates over a file or directory and sets the permissions on all of the
// underlying files. Iterate over all of the files and directories. If it is a file just
// go ahead and perform the chown operation. Otherwise dig deeper into the directory until
// we've run out of directories to dig into.
-func (fs *Filesystem) Chown(path string) error {
- cleaned, err := fs.SafePath(path)
- if err != nil {
- return err
- }
- return fs.unsafeChown(cleaned)
-}
-
-// unsafeChown chowns the given path, without checking if the path is safe. This should only be used
-// when the path has already been checked.
-func (fs *Filesystem) unsafeChown(path string) error {
+func (fs *Filesystem) Chown(p string) error {
if fs.isTest {
return nil
}
@@ -236,54 +222,44 @@ func (fs *Filesystem) unsafeChown(path string) error {
uid := config.Get().System.User.Uid
gid := config.Get().System.User.Gid
+ dirfd, name, closeFd, err := fs.unixFS.SafePath(p)
+ defer closeFd()
+ if err != nil {
+ return err
+ }
+
// Start by just chowning the initial path that we received.
- if err := os.Chown(path, uid, gid); err != nil {
+ if err := fs.unixFS.Lchownat(dirfd, name, uid, gid); err != nil {
return errors.Wrap(err, "server/filesystem: chown: failed to chown path")
}
// If this is not a directory we can now return from the function, there is nothing
// left that we need to do.
- if st, err := os.Stat(path); err != nil || !st.IsDir() {
+ if st, err := fs.unixFS.Lstatat(dirfd, name); err != nil || !st.IsDir() {
return nil
}
- // If this was a directory, begin walking over its contents recursively and ensure that all
- // of the subfiles and directories get their permissions updated as well.
- err := godirwalk.Walk(path, &godirwalk.Options{
- Unsorted: true,
- Callback: func(p string, e *godirwalk.Dirent) error {
- // Do not attempt to chown a symlink. Go's os.Chown function will affect the symlink
- // so if it points to a location outside the data directory the user would be able to
- // (un)intentionally modify that files permissions.
- if e.IsSymlink() {
- if e.IsDir() {
- return godirwalk.SkipThis
- }
-
- return nil
- }
-
- return os.Chown(p, uid, gid)
- },
- })
- return errors.Wrap(err, "server/filesystem: chown: failed to chown during walk function")
+ // This walker is probably some of the most efficient code in Wings. It has
+ // an internally re-used buffer for listing directory entries and doesn't
+ // need to check if every individual path it touches is safe as the code
+ // doesn't traverse symlinks, is immune to symlink timing attacks, and
+ // gives us a dirfd and file name to make a direct syscall with.
+ if err := fs.unixFS.WalkDirat(dirfd, name, func(dirfd int, name, _ string, info ufs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+ if err := fs.unixFS.Lchownat(dirfd, name, uid, gid); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("server/filesystem: chown: failed to chown during walk function: %w", err)
+ }
+ return nil
}
-func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
- cleaned, err := fs.SafePath(path)
- if err != nil {
- return err
- }
-
- if fs.isTest {
- return nil
- }
-
- if err := os.Chmod(cleaned, mode); err != nil {
- return err
- }
-
- return nil
+func (fs *Filesystem) Chmod(path string, mode ufs.FileMode) error {
+ return fs.unixFS.Chmod(path, mode)
}
// Begin looping up to 50 times to try and create a unique copy file name. This will take
@@ -294,7 +270,7 @@ func (fs *Filesystem) Chmod(path string, mode os.FileMode) error {
// Could probably make this more efficient by checking if there are any files matching the copy
// pattern, and trying to find the highest number and then incrementing it by one rather than
// looping endlessly.
-func (fs *Filesystem) findCopySuffix(dir string, name string, extension string) (string, error) {
+func (fs *Filesystem) findCopySuffix(dirfd int, name, extension string) (string, error) {
var i int
suffix := " copy"
@@ -306,11 +282,10 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
n := name + suffix + extension
// If we stat the file and it does not exist that means we're good to create the copy. If it
// does exist, we'll just continue to the next loop and try again.
- if _, err := fs.Stat(path.Join(dir, n)); err != nil {
- if !errors.Is(err, os.ErrNotExist) {
+ if _, err := fs.unixFS.Lstatat(dirfd, n); err != nil {
+ if !errors.Is(err, ufs.ErrNotExist) {
return "", err
}
-
break
}
@@ -322,53 +297,68 @@ func (fs *Filesystem) findCopySuffix(dir string, name string, extension string)
return name + suffix + extension, nil
}
-// Copies a given file to the same location and appends a suffix to the file to indicate that
-// it has been copied.
+// Copy copies a given file to the same location and appends a suffix to the
+// file to indicate that it has been copied.
func (fs *Filesystem) Copy(p string) error {
- cleaned, err := fs.SafePath(p)
+ dirfd, name, closeFd, err := fs.unixFS.SafePath(p)
+ defer closeFd()
if err != nil {
return err
}
-
- s, err := os.Stat(cleaned)
- if err != nil {
- return err
- } else if s.IsDir() || !s.Mode().IsRegular() {
- // If this is a directory or not a regular file, just throw a not-exist error
- // since anything calling this function should understand what that means.
- return os.ErrNotExist
- }
-
- // Check that copying this file wouldn't put the server over its limit.
- if err := fs.HasSpaceFor(s.Size()); err != nil {
- return err
- }
-
- base := filepath.Base(cleaned)
- relative := strings.TrimSuffix(strings.TrimPrefix(cleaned, fs.Path()), base)
- extension := filepath.Ext(base)
- name := strings.TrimSuffix(base, extension)
-
- // Ensure that ".tar" is also counted as apart of the file extension.
- // There might be a better way to handle this for other double file extensions,
- // but this is a good workaround for now.
- if strings.HasSuffix(name, ".tar") {
- extension = ".tar" + extension
- name = strings.TrimSuffix(name, ".tar")
- }
-
- source, err := os.Open(cleaned)
+ source, err := fs.unixFS.OpenFileat(dirfd, name, ufs.O_RDONLY, 0)
if err != nil {
return err
}
defer source.Close()
+ info, err := source.Stat()
+ if err != nil {
+ return err
+ }
+ if info.IsDir() || !info.Mode().IsRegular() {
+ // If this is a directory or not a regular file, just throw a not-exist error
+ // since anything calling this function should understand what that means.
+ return ufs.ErrNotExist
+ }
+ currentSize := info.Size()
- n, err := fs.findCopySuffix(relative, name, extension)
+ // Check that copying this file wouldn't put the server over its limit.
+ if err := fs.HasSpaceFor(currentSize); err != nil {
+ return err
+ }
+
+ base := info.Name()
+ extension := filepath.Ext(base)
+ baseName := strings.TrimSuffix(base, extension)
+
+ // Ensure that ".tar" is also counted as apart of the file extension.
+ // There might be a better way to handle this for other double file extensions,
+ // but this is a good workaround for now.
+ if strings.HasSuffix(baseName, ".tar") {
+ extension = ".tar" + extension
+ baseName = strings.TrimSuffix(baseName, ".tar")
+ }
+
+ newName, err := fs.findCopySuffix(dirfd, baseName, extension)
+ if err != nil {
+ return err
+ }
+ dst, err := fs.unixFS.OpenFileat(dirfd, newName, ufs.O_WRONLY|ufs.O_CREATE, info.Mode())
if err != nil {
return err
}
- return fs.Writefile(path.Join(relative, n), source)
+ // Do not use CopyBuffer here, it is wasteful as the file implements
+ // io.ReaderFrom, which causes it to not use the buffer anyways.
+ n, err := io.Copy(dst, io.LimitReader(source, currentSize))
+ fs.unixFS.Add(n)
+
+ if !fs.isTest {
+ if err := fs.unixFS.Lchownat(dirfd, newName, config.Get().System.User.Uid, config.Get().System.User.Gid); err != nil {
+ return err
+ }
+ }
+ // Return the error from io.Copy.
+ return err
}
// TruncateRootDirectory removes _all_ files and directories from a server's
@@ -380,211 +370,128 @@ func (fs *Filesystem) TruncateRootDirectory() error {
if err := os.Mkdir(fs.Path(), 0o755); err != nil {
return err
}
- atomic.StoreInt64(&fs.diskUsed, 0)
+ _ = fs.unixFS.Close()
+ unixFS, err := ufs.NewUnixFS(fs.Path(), config.UseOpenat2())
+ if err != nil {
+ return err
+ }
+ var limit int64
+ if fs.isTest {
+ limit = 0
+ } else {
+ limit = fs.unixFS.Limit()
+ }
+ fs.unixFS = ufs.NewQuota(unixFS, limit)
return nil
}
// Delete removes a file or folder from the system. Prevents the user from
// accidentally (or maliciously) removing their root server data directory.
func (fs *Filesystem) Delete(p string) error {
- // This is one of the few (only?) places in the codebase where we're explicitly not using
- // the SafePath functionality when working with user provided input. If we did, you would
- // not be able to delete a file that is a symlink pointing to a location outside the data
- // directory.
- //
- // We also want to avoid resolving a symlink that points _within_ the data directory and thus
- // deleting the actual source file for the symlink rather than the symlink itself. For these
- // purposes just resolve the actual file path using filepath.Join() and confirm that the path
- // exists within the data directory.
- resolved := fs.unsafeFilePath(p)
- if !fs.unsafeIsInDataDirectory(resolved) {
- return NewBadPathResolution(p, resolved)
- }
-
- // Block any whoopsies.
- if resolved == fs.Path() {
- return errors.New("cannot delete root server directory")
- }
-
- st, err := os.Lstat(resolved)
- if err != nil {
- if !os.IsNotExist(err) {
- fs.error(err).Warn("error while attempting to stat file before deletion")
- return err
- }
-
- // The following logic is used to handle a case where a user attempts to
- // delete a file that does not exist through a directory symlink.
- // We don't want to reveal that the file does not exist, so we validate
- // the path of the symlink and return a bad path error if it is invalid.
-
- // The requested file or directory doesn't exist, so at this point we
- // need to iterate up the path chain until we hit a directory that
- // _does_ exist and can be validated.
- parts := strings.Split(filepath.Dir(resolved), "/")
-
- // Range over all the path parts and form directory paths from the end
- // moving up until we have a valid resolution, or we run out of paths to
- // try.
- for k := range parts {
- try := strings.Join(parts[:(len(parts)-k)], "/")
- if !fs.unsafeIsInDataDirectory(try) {
- break
- }
-
- t, err := filepath.EvalSymlinks(try)
- if err == nil {
- if !fs.unsafeIsInDataDirectory(t) {
- return NewBadPathResolution(p, t)
- }
- break
- }
- }
-
- // Always return early if the file does not exist.
- return nil
- }
-
- // If the file is not a symlink, we need to check that it is not within a
- // symlinked directory that points outside the data directory.
- if st.Mode()&os.ModeSymlink == 0 {
- ep, err := filepath.EvalSymlinks(resolved)
- if err != nil {
- if !os.IsNotExist(err) {
- return err
- }
- } else if !fs.unsafeIsInDataDirectory(ep) {
- return NewBadPathResolution(p, ep)
- }
- }
-
- if st.IsDir() {
- if s, err := fs.DirectorySize(resolved); err == nil {
- fs.addDisk(-s)
- }
- } else {
- fs.addDisk(-st.Size())
- }
-
- return os.RemoveAll(resolved)
+ return fs.unixFS.RemoveAll(p)
}
-type fileOpener struct {
- busy uint
-}
-
-// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
-// cannot be opened because of a "text file busy" error, we will attempt until the number of attempts
-// has been exhaused, at which point we will abort with an error.
-func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File, error) {
- for {
- f, err := os.OpenFile(path, flags, perm)
-
- // If there is an error because the text file is busy, go ahead and sleep for a few
- // hundred milliseconds and then try again up to three times before just returning the
- // error back to the caller.
- //
- // Based on code from: https://github.com/golang/go/issues/22220#issuecomment-336458122
- if err != nil && fo.busy < 3 && strings.Contains(err.Error(), "text file busy") {
- time.Sleep(100 * time.Millisecond << fo.busy)
- fo.busy++
- continue
- }
-
- return f, err
- }
-}
+//type fileOpener struct {
+// fs *Filesystem
+// busy uint
+//}
+//
+//// Attempts to open a given file up to "attempts" number of times, using a backoff. If the file
+//// cannot be opened because of a "text file busy" error, we will attempt until the number of attempts
+//// has been exhaused, at which point we will abort with an error.
+//func (fo *fileOpener) open(path string, flags int, perm ufs.FileMode) (ufs.File, error) {
+// for {
+// f, err := fo.fs.unixFS.OpenFile(path, flags, perm)
+//
+// // If there is an error because the text file is busy, go ahead and sleep for a few
+// // hundred milliseconds and then try again up to three times before just returning the
+// // error back to the caller.
+// //
+// // Based on code from: https://github.com/golang/go/issues/22220#issuecomment-336458122
+// if err != nil && fo.busy < 3 && strings.Contains(err.Error(), "text file busy") {
+// time.Sleep(100 * time.Millisecond << fo.busy)
+// fo.busy++
+// continue
+// }
+//
+// return f, err
+// }
+//}
// ListDirectory lists the contents of a given directory and returns stat
// information about each file and folder within it.
func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
- cleaned, err := fs.SafePath(p)
- if err != nil {
- return nil, err
- }
-
- files, err := ioutil.ReadDir(cleaned)
- if err != nil {
- return nil, err
- }
-
- var wg sync.WaitGroup
-
- // You must initialize the output of this directory as a non-nil value otherwise
- // when it is marshaled into a JSON object you'll just get 'null' back, which will
- // break the panel badly.
- out := make([]Stat, len(files))
-
- // Iterate over all of the files and directories returned and perform an async process
- // to get the mime-type for them all.
- for i, file := range files {
- wg.Add(1)
-
- go func(idx int, f os.FileInfo) {
- defer wg.Done()
-
- var m *mimetype.MIME
- d := "inode/directory"
- if !f.IsDir() {
- cleanedp := filepath.Join(cleaned, f.Name())
- if f.Mode()&os.ModeSymlink != 0 {
- cleanedp, _ = fs.SafePath(filepath.Join(cleaned, f.Name()))
- }
-
- // Don't try to detect the type on a pipe — this will just hang the application and
- // you'll never get a response back.
- //
- // @see https://github.com/pterodactyl/panel/issues/4059
- if cleanedp != "" && f.Mode()&os.ModeNamedPipe == 0 {
- m, _ = mimetype.DetectFile(filepath.Join(cleaned, f.Name()))
- } else {
- // Just pass this for an unknown type because the file could not safely be resolved within
- // the server data path.
- d = "application/octet-stream"
- }
- }
-
- st := Stat{FileInfo: f, Mimetype: d}
- if m != nil {
- st.Mimetype = m.String()
- }
- out[idx] = st
- }(i, file)
- }
-
- wg.Wait()
-
- // Sort the output alphabetically to begin with since we've run the output
- // through an asynchronous process and the order is gonna be very random.
- sort.SliceStable(out, func(i, j int) bool {
- if out[i].Name() == out[j].Name() || out[i].Name() > out[j].Name() {
- return true
+ // Read entries from the path on the filesystem, using the mapped reader, so
+ // we can map the DirEntry slice into a Stat slice with mimetype information.
+ out, err := ufs.ReadDirMap(fs.unixFS.UnixFS, p, func(e ufs.DirEntry) (Stat, error) {
+ info, err := e.Info()
+ if err != nil {
+ return Stat{}, err
+ }
+
+ var d string
+ if e.Type().IsDir() {
+ d = "inode/directory"
+ } else {
+ d = "application/octet-stream"
+ }
+ var m *mimetype.MIME
+ if e.Type().IsRegular() {
+ // TODO: I should probably find a better way to do this.
+ eO := e.(interface {
+ Open() (ufs.File, error)
+ })
+ f, err := eO.Open()
+ if err != nil {
+ return Stat{}, err
+ }
+ m, err = mimetype.DetectReader(f)
+ if err != nil {
+ log.Error(err.Error())
+ }
+ _ = f.Close()
+ }
+
+ st := Stat{FileInfo: info, Mimetype: d}
+ if m != nil {
+ st.Mimetype = m.String()
+ }
+ return st, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // Sort entries alphabetically.
+ slices.SortStableFunc(out, func(a, b Stat) int {
+ switch {
+ case a.Name() == b.Name():
+ return 0
+ case a.Name() > b.Name():
+ return 1
+ default:
+ return -1
}
- return false
})
- // Then, sort it so that directories are listed first in the output. Everything
- // will continue to be alphabetized at this point.
- sort.SliceStable(out, func(i, j int) bool {
- return out[i].IsDir()
+ // Sort folders before other file types.
+ slices.SortStableFunc(out, func(a, b Stat) int {
+ switch {
+ case a.IsDir() && b.IsDir():
+ return 0
+ case a.IsDir():
+ return 1
+ default:
+ return -1
+ }
})
return out, nil
}
func (fs *Filesystem) Chtimes(path string, atime, mtime time.Time) error {
- cleaned, err := fs.SafePath(path)
- if err != nil {
- return err
- }
-
if fs.isTest {
return nil
}
-
- if err := os.Chtimes(cleaned, atime, mtime); err != nil {
- return err
- }
-
- return nil
+ return fs.unixFS.Chtimes(path, atime, mtime)
}
diff --git a/server/filesystem/filesystem_test.go b/server/filesystem/filesystem_test.go
index 8dbce24..67b1fc0 100644
--- a/server/filesystem/filesystem_test.go
+++ b/server/filesystem/filesystem_test.go
@@ -7,12 +7,13 @@ import (
"math/rand"
"os"
"path/filepath"
- "sync/atomic"
"testing"
"unicode/utf8"
. "github.com/franela/goblin"
+ "github.com/Tech-Gamer/nwy-wings/internal/ufs"
+
"github.com/Tech-Gamer/nwy-wings/config"
)
@@ -28,15 +29,23 @@ func NewFs() (*Filesystem, *rootFs) {
tmpDir, err := os.MkdirTemp(os.TempDir(), "pterodactyl")
if err != nil {
panic(err)
+ return nil, nil
}
- // defer os.RemoveAll(tmpDir)
rfs := rootFs{root: tmpDir}
- rfs.reset()
+ p := filepath.Join(tmpDir, "server")
+ if err := os.Mkdir(p, 0o755); err != nil {
+ panic(err)
+ return nil, nil
+ }
- fs := New(filepath.Join(tmpDir, "/server"), 0, []string{})
+ fs, _ := New(p, 0, []string{})
fs.isTest = true
+ if err := fs.TruncateRootDirectory(); err != nil {
+ panic(err)
+ return nil, nil
+ }
return fs, &rfs
}
@@ -45,7 +54,7 @@ type rootFs struct {
root string
}
-func getFileContent(file *os.File) string {
+func getFileContent(file ufs.File) string {
var w bytes.Buffer
if _, err := bufio.NewReader(file).WriteTo(&w); err != nil {
panic(err)
@@ -54,11 +63,11 @@ func getFileContent(file *os.File) string {
}
func (rfs *rootFs) CreateServerFile(p string, c []byte) error {
- f, err := os.Create(filepath.Join(rfs.root, "/server", p))
+ f, err := os.Create(filepath.Join(rfs.root, "server", p))
if err == nil {
- f.Write(c)
- f.Close()
+ _, _ = f.Write(c)
+ _ = f.Close()
}
return err
@@ -69,19 +78,7 @@ func (rfs *rootFs) CreateServerFileFromString(p string, c string) error {
}
func (rfs *rootFs) StatServerFile(p string) (os.FileInfo, error) {
- return os.Stat(filepath.Join(rfs.root, "/server", p))
-}
-
-func (rfs *rootFs) reset() {
- if err := os.RemoveAll(filepath.Join(rfs.root, "/server")); err != nil {
- if !os.IsNotExist(err) {
- panic(err)
- }
- }
-
- if err := os.Mkdir(filepath.Join(rfs.root, "/server"), 0o755); err != nil {
- panic(err)
- }
+ return os.Stat(filepath.Join(rfs.root, "server", p))
}
func TestFilesystem_Openfile(t *testing.T) {
@@ -93,7 +90,8 @@ func TestFilesystem_Openfile(t *testing.T) {
_, _, err := fs.File("foo/bar.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrNotExist)).IsTrue()
+ // TODO
+ //g.Assert(IsErrorCode(err, ErrNotExist)).IsTrue()
})
g.It("returns file stat information", func() {
@@ -108,14 +106,14 @@ func TestFilesystem_Openfile(t *testing.T) {
})
g.AfterEach(func() {
- rfs.reset()
+ _ = fs.TruncateRootDirectory()
})
})
}
func TestFilesystem_Writefile(t *testing.T) {
g := Goblin(t)
- fs, rfs := NewFs()
+ fs, _ := NewFs()
g.Describe("Open and WriteFile", func() {
buf := &bytes.Buffer{}
@@ -125,22 +123,22 @@ func TestFilesystem_Writefile(t *testing.T) {
g.It("can create a new file", func() {
r := bytes.NewReader([]byte("test file content"))
- g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
+ g.Assert(fs.CachedUsage()).Equal(int64(0))
- err := fs.Writefile("test.txt", r)
+ err := fs.Write("test.txt", r, r.Size(), 0o644)
g.Assert(err).IsNil()
f, _, err := fs.File("test.txt")
g.Assert(err).IsNil()
defer f.Close()
g.Assert(getFileContent(f)).Equal("test file content")
- g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(r.Size())
+ g.Assert(fs.CachedUsage()).Equal(r.Size())
})
g.It("can create a new file inside a nested directory with leading slash", func() {
r := bytes.NewReader([]byte("test file content"))
- err := fs.Writefile("/some/nested/test.txt", r)
+ err := fs.Write("/some/nested/test.txt", r, r.Size(), 0o644)
g.Assert(err).IsNil()
f, _, err := fs.File("/some/nested/test.txt")
@@ -152,7 +150,7 @@ func TestFilesystem_Writefile(t *testing.T) {
g.It("can create a new file inside a nested directory without a trailing slash", func() {
r := bytes.NewReader([]byte("test file content"))
- err := fs.Writefile("some/../foo/bar/test.txt", r)
+ err := fs.Write("some/../foo/bar/test.txt", r, r.Size(), 0o644)
g.Assert(err).IsNil()
f, _, err := fs.File("foo/bar/test.txt")
@@ -164,13 +162,13 @@ func TestFilesystem_Writefile(t *testing.T) {
g.It("cannot create a file outside the root directory", func() {
r := bytes.NewReader([]byte("test file content"))
- err := fs.Writefile("/some/../foo/../../test.txt", r)
+ err := fs.Write("/some/../foo/../../test.txt", r, r.Size(), 0o644)
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("cannot write a file that exceeds the disk limits", func() {
- atomic.StoreInt64(&fs.diskLimit, 1024)
+ fs.SetDiskLimit(1024)
b := make([]byte, 1025)
_, err := rand.Read(b)
@@ -178,18 +176,18 @@ func TestFilesystem_Writefile(t *testing.T) {
g.Assert(len(b)).Equal(1025)
r := bytes.NewReader(b)
- err = fs.Writefile("test.txt", r)
+ err = fs.Write("test.txt", r, int64(len(b)), 0o644)
g.Assert(err).IsNotNil()
g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
})
g.It("truncates the file when writing new contents", func() {
r := bytes.NewReader([]byte("original data"))
- err := fs.Writefile("test.txt", r)
+ err := fs.Write("test.txt", r, r.Size(), 0o644)
g.Assert(err).IsNil()
r = bytes.NewReader([]byte("new data"))
- err = fs.Writefile("test.txt", r)
+ err = fs.Write("test.txt", r, r.Size(), 0o644)
g.Assert(err).IsNil()
f, _, err := fs.File("test.txt")
@@ -200,10 +198,7 @@ func TestFilesystem_Writefile(t *testing.T) {
g.AfterEach(func() {
buf.Truncate(0)
- rfs.reset()
-
- atomic.StoreInt64(&fs.diskUsed, 0)
- atomic.StoreInt64(&fs.diskLimit, 0)
+ _ = fs.TruncateRootDirectory()
})
})
}
@@ -236,17 +231,17 @@ func TestFilesystem_CreateDirectory(t *testing.T) {
g.It("should not allow the creation of directories outside the root", func() {
err := fs.CreateDirectory("test", "e/../../something")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("should not increment the disk usage", func() {
err := fs.CreateDirectory("test", "/")
g.Assert(err).IsNil()
- g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
+ g.Assert(fs.CachedUsage()).Equal(int64(0))
})
g.AfterEach(func() {
- rfs.reset()
+ _ = fs.TruncateRootDirectory()
})
})
}
@@ -268,25 +263,25 @@ func TestFilesystem_Rename(t *testing.T) {
err = fs.Rename("source.txt", "target.txt")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrExist)).IsTrue("err is not ErrExist")
})
g.It("returns an error if the final destination is the root directory", func() {
err := fs.Rename("source.txt", "/")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("returns an error if the source destination is the root directory", func() {
- err := fs.Rename("source.txt", "/")
+ err := fs.Rename("/", "target.txt")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("does not allow renaming to a location outside the root", func() {
err := fs.Rename("source.txt", "../target.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("does not allow renaming from a location outside the root", func() {
@@ -294,7 +289,7 @@ func TestFilesystem_Rename(t *testing.T) {
err = fs.Rename("/../ext-source.txt", "target.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("allows a file to be renamed", func() {
@@ -303,7 +298,7 @@ func TestFilesystem_Rename(t *testing.T) {
_, err = rfs.StatServerFile("source.txt")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
st, err := rfs.StatServerFile("target.txt")
g.Assert(err).IsNil()
@@ -320,7 +315,7 @@ func TestFilesystem_Rename(t *testing.T) {
_, err = rfs.StatServerFile("source_dir")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
st, err := rfs.StatServerFile("target_dir")
g.Assert(err).IsNil()
@@ -330,7 +325,7 @@ func TestFilesystem_Rename(t *testing.T) {
g.It("returns an error if the source does not exist", func() {
err := fs.Rename("missing.txt", "target.txt")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
})
g.It("creates directories if they are missing", func() {
@@ -343,7 +338,7 @@ func TestFilesystem_Rename(t *testing.T) {
})
g.AfterEach(func() {
- rfs.reset()
+ _ = fs.TruncateRootDirectory()
})
})
}
@@ -358,13 +353,13 @@ func TestFilesystem_Copy(t *testing.T) {
panic(err)
}
- atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")))
+ fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content")))
})
g.It("should return an error if the source does not exist", func() {
err := fs.Copy("foo.txt")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
})
g.It("should return an error if the source is outside the root", func() {
@@ -372,11 +367,11 @@ func TestFilesystem_Copy(t *testing.T) {
err = fs.Copy("../ext-source.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("should return an error if the source directory is outside the root", func() {
- err := os.MkdirAll(filepath.Join(rfs.root, "/nested/in/dir"), 0o755)
+ err := os.MkdirAll(filepath.Join(rfs.root, "nested/in/dir"), 0o755)
g.Assert(err).IsNil()
err = rfs.CreateServerFileFromString("/../nested/in/dir/ext-source.txt", "external content")
@@ -384,28 +379,28 @@ func TestFilesystem_Copy(t *testing.T) {
err = fs.Copy("../nested/in/dir/ext-source.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
err = fs.Copy("nested/in/../../../nested/in/dir/ext-source.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("should return an error if the source is a directory", func() {
- err := os.Mkdir(filepath.Join(rfs.root, "/server/dir"), 0o755)
+ err := os.Mkdir(filepath.Join(rfs.root, "server/dir"), 0o755)
g.Assert(err).IsNil()
err = fs.Copy("dir")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
})
g.It("should return an error if there is not space to copy the file", func() {
- atomic.StoreInt64(&fs.diskLimit, 2)
+ fs.SetDiskLimit(2)
err := fs.Copy("source.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue()
+ g.Assert(IsErrorCode(err, ErrCodeDiskSpace)).IsTrue("err is not ErrCodeDiskSpace")
})
g.It("should create a copy of the file and increment the disk used", func() {
@@ -433,7 +428,7 @@ func TestFilesystem_Copy(t *testing.T) {
g.Assert(err).IsNil()
}
- g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(utf8.RuneCountInString("test content")) * 3)
+ g.Assert(fs.CachedUsage()).Equal(int64(utf8.RuneCountInString("test content")) * 3)
})
g.It("should create a copy inside of a directory", func() {
@@ -454,10 +449,7 @@ func TestFilesystem_Copy(t *testing.T) {
})
g.AfterEach(func() {
- rfs.reset()
-
- atomic.StoreInt64(&fs.diskUsed, 0)
- atomic.StoreInt64(&fs.diskLimit, 0)
+ _ = fs.TruncateRootDirectory()
})
})
}
@@ -472,7 +464,7 @@ func TestFilesystem_Delete(t *testing.T) {
panic(err)
}
- atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")))
+ fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content")))
})
g.It("does not delete files outside the root directory", func() {
@@ -480,13 +472,13 @@ func TestFilesystem_Delete(t *testing.T) {
err = fs.Delete("../ext-source.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("does not allow the deletion of the root directory", func() {
err := fs.Delete("/")
g.Assert(err).IsNotNil()
- g.Assert(err.Error()).Equal("cannot delete root server directory")
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("does not return an error if the target does not exist", func() {
@@ -504,9 +496,9 @@ func TestFilesystem_Delete(t *testing.T) {
_, err = rfs.StatServerFile("source.txt")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
- g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
+ g.Assert(fs.CachedUsage()).Equal(int64(0))
})
g.It("deletes all items inside a directory if the directory is deleted", func() {
@@ -524,16 +516,16 @@ func TestFilesystem_Delete(t *testing.T) {
g.Assert(err).IsNil()
}
- atomic.StoreInt64(&fs.diskUsed, int64(utf8.RuneCountInString("test content")*3))
+ fs.unixFS.SetUsage(int64(utf8.RuneCountInString("test content") * 3))
err = fs.Delete("foo")
g.Assert(err).IsNil()
- g.Assert(atomic.LoadInt64(&fs.diskUsed)).Equal(int64(0))
+ g.Assert(fs.unixFS.Usage()).Equal(int64(0))
for _, s := range sources {
_, err = rfs.StatServerFile(s)
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
}
})
@@ -589,7 +581,7 @@ func TestFilesystem_Delete(t *testing.T) {
// Delete a file inside the symlinked directory.
err = fs.Delete("symlink/source.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
// Ensure the file outside the root directory still exists.
_, err = os.Lstat(filepath.Join(rfs.root, "foo/source.txt"))
@@ -608,14 +600,11 @@ func TestFilesystem_Delete(t *testing.T) {
// Delete a file inside the symlinked directory.
err = fs.Delete("symlink/source.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.AfterEach(func() {
- rfs.reset()
-
- atomic.StoreInt64(&fs.diskUsed, 0)
- atomic.StoreInt64(&fs.diskLimit, 0)
+ _ = fs.TruncateRootDirectory()
})
})
}
diff --git a/server/filesystem/path.go b/server/filesystem/path.go
index 3952e5d..edb1cad 100644
--- a/server/filesystem/path.go
+++ b/server/filesystem/path.go
@@ -1,71 +1,28 @@
package filesystem
import (
- "context"
- iofs "io/fs"
- "os"
"path/filepath"
"strings"
- "sync"
"emperror.dev/errors"
- "golang.org/x/sync/errgroup"
)
// Checks if the given file or path is in the server's file denylist. If so, an Error
// is returned, otherwise nil is returned.
func (fs *Filesystem) IsIgnored(paths ...string) error {
for _, p := range paths {
- sp, err := fs.SafePath(p)
- if err != nil {
- return err
- }
- if fs.denylist.MatchesPath(sp) {
- return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: sp})
+ //sp, err := fs.SafePath(p)
+ //if err != nil {
+ // return err
+ //}
+ // TODO: update logic to use unixFS
+ if fs.denylist.MatchesPath(p) {
+ return errors.WithStack(&Error{code: ErrCodeDenylistFile, path: p, resolved: p})
}
}
return nil
}
-// Normalizes a directory being passed in to ensure the user is not able to escape
-// from their data directory. After normalization if the directory is still within their home
-// path it is returned. If they managed to "escape" an error will be returned.
-//
-// This logic is actually copied over from the SFTP server code. Ideally that eventually
-// either gets ported into this application, or is able to make use of this package.
-func (fs *Filesystem) SafePath(p string) (string, error) {
- // Start with a cleaned up path before checking the more complex bits.
- r := fs.unsafeFilePath(p)
-
- // At the same time, evaluate the symlink status and determine where this file or folder
- // is truly pointing to.
- ep, err := filepath.EvalSymlinks(r)
- if err != nil && !os.IsNotExist(err) {
- return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
- } else if os.IsNotExist(err) {
- // The target of one of the symlinks (EvalSymlinks is recursive) does not exist.
- // So we get what target path does not exist and check if it's within the data
- // directory. If it is, we return the original path, otherwise we return an error.
- pErr, ok := err.(*iofs.PathError)
- if !ok {
- return "", errors.Wrap(err, "server/filesystem: failed to evaluate symlink")
- }
- ep = pErr.Path
- }
-
- // If the requested directory from EvalSymlinks begins with the server root directory go
- // ahead and return it. If not we'll return an error which will block any further action
- // on the file.
- if fs.unsafeIsInDataDirectory(ep) {
- // Returning the original path here instead of the resolved path ensures that
- // whatever the user is trying to do will work as expected. If we returned the
- // resolved path, the user would be unable to know that it is in fact a symlink.
- return r, nil
- }
-
- return "", NewBadPathResolution(p, r)
-}
-
// Generate a path to the file by cleaning it up and appending the root server path to it. This
// DOES NOT guarantee that the file resolves within the server data directory. You'll want to use
// the fs.unsafeIsInDataDirectory(p) function to confirm.
@@ -84,51 +41,3 @@ func (fs *Filesystem) unsafeFilePath(p string) string {
func (fs *Filesystem) unsafeIsInDataDirectory(p string) bool {
return strings.HasPrefix(strings.TrimSuffix(p, "/")+"/", strings.TrimSuffix(fs.Path(), "/")+"/")
}
-
-// Executes the fs.SafePath function in parallel against an array of paths. If any of the calls
-// fails an error will be returned.
-func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
- var cleaned []string
-
- // Simple locker function to avoid racy appends to the array of cleaned paths.
- m := new(sync.Mutex)
- push := func(c string) {
- m.Lock()
- cleaned = append(cleaned, c)
- m.Unlock()
- }
-
- // Create an error group that we can use to run processes in parallel while retaining
- // the ability to cancel the entire process immediately should any of it fail.
- g, ctx := errgroup.WithContext(context.Background())
-
- // Iterate over all of the paths and generate a cleaned path, if there is an error for any
- // of the files, abort the process.
- for _, p := range paths {
- // Create copy so we can use it within the goroutine correctly.
- pi := p
-
- // Recursively call this function to continue digging through the directory tree within
- // a separate goroutine. If the context is canceled abort this process.
- g.Go(func() error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- // If the callback returns true, go ahead and keep walking deeper. This allows
- // us to programmatically continue deeper into directories, or stop digging
- // if that pathway knows it needs nothing else.
- if c, err := fs.SafePath(pi); err != nil {
- return err
- } else {
- push(c)
- }
-
- return nil
- }
- })
- }
-
- // Block until all of the routines finish and have returned a value.
- return cleaned, g.Wait()
-}
diff --git a/server/filesystem/path_test.go b/server/filesystem/path_test.go
index ecb9627..4d46fbf 100644
--- a/server/filesystem/path_test.go
+++ b/server/filesystem/path_test.go
@@ -8,6 +8,8 @@ import (
"emperror.dev/errors"
. "github.com/franela/goblin"
+
+ "github.com/pterodactyl/wings/internal/ufs"
)
func TestFilesystem_Path(t *testing.T) {
@@ -21,80 +23,6 @@ func TestFilesystem_Path(t *testing.T) {
})
}
-func TestFilesystem_SafePath(t *testing.T) {
- g := Goblin(t)
- fs, rfs := NewFs()
- prefix := filepath.Join(rfs.root, "/server")
-
- g.Describe("SafePath", func() {
- g.It("returns a cleaned path to a given file", func() {
- p, err := fs.SafePath("test.txt")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix + "/test.txt")
-
- p, err = fs.SafePath("/test.txt")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix + "/test.txt")
-
- p, err = fs.SafePath("./test.txt")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix + "/test.txt")
-
- p, err = fs.SafePath("/foo/../test.txt")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix + "/test.txt")
-
- p, err = fs.SafePath("/foo/bar")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix + "/foo/bar")
- })
-
- g.It("handles root directory access", func() {
- p, err := fs.SafePath("/")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix)
-
- p, err = fs.SafePath("")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix)
- })
-
- g.It("removes trailing slashes from paths", func() {
- p, err := fs.SafePath("/foo/bar/")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix + "/foo/bar")
- })
-
- g.It("handles deeply nested directories that do not exist", func() {
- p, err := fs.SafePath("/foo/bar/baz/quaz/../../ducks/testing.txt")
- g.Assert(err).IsNil()
- g.Assert(p).Equal(prefix + "/foo/bar/ducks/testing.txt")
- })
-
- g.It("blocks access to files outside the root directory", func() {
- p, err := fs.SafePath("../test.txt")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
- g.Assert(p).Equal("")
-
- p, err = fs.SafePath("/../test.txt")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
- g.Assert(p).Equal("")
-
- p, err = fs.SafePath("./foo/../../test.txt")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
- g.Assert(p).Equal("")
-
- p, err = fs.SafePath("..")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
- g.Assert(p).Equal("")
- })
- })
-}
-
// We test against accessing files outside the root directory in the tests, however it
// is still possible for someone to mess up and not properly use this safe path call. In
// order to truly confirm this, we'll try to pass in a symlinked malicious file to all of
@@ -133,7 +61,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Writefile("symlinked.txt", r)
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("cannot write to a non-existent file symlinked outside the root", func() {
@@ -141,7 +69,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Writefile("symlinked_does_not_exist.txt", r)
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("cannot write to chained symlinks with target that does not exist outside the root", func() {
@@ -149,7 +77,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Writefile("symlinked_does_not_exist2.txt", r)
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrBadPathResolution)).IsTrue("err is not ErrBadPathResolution")
})
g.It("cannot write a file to a directory symlinked outside the root", func() {
@@ -157,7 +85,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
err := fs.Writefile("external_dir/foo.txt", r)
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
})
})
@@ -165,55 +93,54 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
g.It("cannot create a directory outside the root", func() {
err := fs.CreateDirectory("my_dir", "external_dir")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
})
g.It("cannot create a nested directory outside the root", func() {
err := fs.CreateDirectory("my/nested/dir", "external_dir/foo/bar")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
})
g.It("cannot create a nested directory outside the root", func() {
err := fs.CreateDirectory("my/nested/dir", "external_dir/server")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue("err is not ErrNotDirectory")
})
})
g.Describe("Rename", func() {
- g.It("cannot rename a file symlinked outside the directory root", func() {
- err := fs.Rename("symlinked.txt", "foo.txt")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.It("can rename a file symlinked outside the directory root", func() {
+ _, err := os.Lstat(filepath.Join(rfs.root, "server", "symlinked.txt"))
+ g.Assert(err).IsNil()
+ err = fs.Rename("symlinked.txt", "foo.txt")
+ g.Assert(err).IsNil()
+ _, err = os.Lstat(filepath.Join(rfs.root, "server", "foo.txt"))
+ g.Assert(err).IsNil()
})
- g.It("cannot rename a symlinked directory outside the root", func() {
- err := fs.Rename("external_dir", "foo")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.It("can rename a symlinked directory outside the root", func() {
+ _, err := os.Lstat(filepath.Join(rfs.root, "server", "external_dir"))
+ g.Assert(err).IsNil()
+ err = fs.Rename("external_dir", "foo")
+ g.Assert(err).IsNil()
+ _, err = os.Lstat(filepath.Join(rfs.root, "server", "foo"))
+ g.Assert(err).IsNil()
})
g.It("cannot rename a file to a location outside the directory root", func() {
- rfs.CreateServerFileFromString("my_file.txt", "internal content")
+ _ = rfs.CreateServerFileFromString("my_file.txt", "internal content")
+ t.Log(rfs.root)
- err := fs.Rename("my_file.txt", "external_dir/my_file.txt")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
- })
- })
+ st, err := os.Lstat(filepath.Join(rfs.root, "server", "foo"))
+ g.Assert(err).IsNil()
+ g.Assert(st.Mode()&ufs.ModeSymlink != 0).IsTrue()
- g.Describe("Chown", func() {
- g.It("cannot chown a file symlinked outside the directory root", func() {
- err := fs.Chown("symlinked.txt")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
- })
+ err = fs.Rename("my_file.txt", "foo/my_file.txt")
+ g.Assert(errors.Is(err, ufs.ErrNotDirectory)).IsTrue()
- g.It("cannot chown a directory symlinked outside the directory root", func() {
- err := fs.Chown("external_dir")
- g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ st, err = os.Lstat(filepath.Join(rfs.root, "malicious_dir", "my_file.txt"))
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue()
})
})
@@ -221,7 +148,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
g.It("cannot copy a file symlinked outside the directory root", func() {
err := fs.Copy("symlinked.txt")
g.Assert(err).IsNotNil()
- g.Assert(IsErrorCode(err, ErrCodePathResolution)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
})
})
@@ -235,9 +162,9 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
_, err = rfs.StatServerFile("symlinked.txt")
g.Assert(err).IsNotNil()
- g.Assert(errors.Is(err, os.ErrNotExist)).IsTrue()
+ g.Assert(errors.Is(err, ufs.ErrNotExist)).IsTrue("err is not ErrNotExist")
})
})
- rfs.reset()
+ _ = fs.TruncateRootDirectory()
}
diff --git a/server/filesystem/stat.go b/server/filesystem/stat.go
index cc25827..9d446be 100644
--- a/server/filesystem/stat.go
+++ b/server/filesystem/stat.go
@@ -1,16 +1,18 @@
package filesystem
import (
- "os"
+ "encoding/json"
+ "io"
"strconv"
"time"
"github.com/gabriel-vasile/mimetype"
- "github.com/goccy/go-json"
+
+ "github.com/pterodactyl/wings/internal/ufs"
)
type Stat struct {
- os.FileInfo
+ ufs.FileInfo
Mimetype string
}
@@ -31,40 +33,31 @@ func (s *Stat) MarshalJSON() ([]byte, error) {
Created: s.CTime().Format(time.RFC3339),
Modified: s.ModTime().Format(time.RFC3339),
Mode: s.Mode().String(),
- // Using `&os.ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
- ModeBits: strconv.FormatUint(uint64(s.Mode()&os.ModePerm), 8),
+ // Using `&ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
+ ModeBits: strconv.FormatUint(uint64(s.Mode()&ufs.ModePerm), 8),
Size: s.Size(),
Directory: s.IsDir(),
File: !s.IsDir(),
- Symlink: s.Mode().Perm()&os.ModeSymlink != 0,
+ Symlink: s.Mode().Perm()&ufs.ModeSymlink != 0,
Mime: s.Mimetype,
})
}
-// Stat stats a file or folder and returns the base stat object from go along
-// with the MIME data that can be used for editing files.
-func (fs *Filesystem) Stat(p string) (Stat, error) {
- cleaned, err := fs.SafePath(p)
+func statFromFile(f ufs.File) (Stat, error) {
+ s, err := f.Stat()
if err != nil {
return Stat{}, err
}
- return fs.unsafeStat(cleaned)
-}
-
-func (fs *Filesystem) unsafeStat(p string) (Stat, error) {
- s, err := os.Stat(p)
- if err != nil {
- return Stat{}, err
- }
-
var m *mimetype.MIME
if !s.IsDir() {
- m, err = mimetype.DetectFile(p)
+ m, err = mimetype.DetectReader(f)
if err != nil {
return Stat{}, err
}
+ if _, err := f.Seek(0, io.SeekStart); err != nil {
+ return Stat{}, err
+ }
}
-
st := Stat{
FileInfo: s,
Mimetype: "inode/directory",
@@ -72,6 +65,20 @@ func (fs *Filesystem) unsafeStat(p string) (Stat, error) {
if m != nil {
st.Mimetype = m.String()
}
-
+ return st, nil
+}
+
+// Stat stats a file or folder and returns the base stat object from go along
+// with the MIME data that can be used for editing files.
+func (fs *Filesystem) Stat(p string) (Stat, error) {
+ f, err := fs.unixFS.Open(p)
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+ st, err := statFromFile(f)
+ if err != nil {
+ return Stat{}, err
+ }
return st, nil
}
diff --git a/server/filesystem/stat_darwin.go b/server/filesystem/stat_darwin.go
deleted file mode 100644
index 6d0cff3..0000000
--- a/server/filesystem/stat_darwin.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package filesystem
-
-import (
- "syscall"
- "time"
-)
-
-// CTime returns the time that the file/folder was created.
-func (s *Stat) CTime() time.Time {
- st := s.Sys().(*syscall.Stat_t)
-
- return time.Unix(st.Ctimespec.Sec, st.Ctimespec.Nsec)
-}
diff --git a/server/filesystem/stat_linux.go b/server/filesystem/stat_linux.go
index a9c7fb3..7891baf 100644
--- a/server/filesystem/stat_linux.go
+++ b/server/filesystem/stat_linux.go
@@ -3,12 +3,22 @@ package filesystem
import (
"syscall"
"time"
+
+ "golang.org/x/sys/unix"
)
-// Returns the time that the file/folder was created.
+// CTime returns the time that the file/folder was created.
+//
+// TODO: remove. Ctim is not actually ever been correct and doesn't actually
+// return the creation time.
func (s *Stat) CTime() time.Time {
- st := s.Sys().(*syscall.Stat_t)
-
- // Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
- return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
+ if st, ok := s.Sys().(*unix.Stat_t); ok {
+ // Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
+ return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
+ }
+ if st, ok := s.Sys().(*syscall.Stat_t); ok {
+ // Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
+ return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec))
+ }
+ return time.Time{}
}
diff --git a/server/filesystem/stat_windows.go b/server/filesystem/stat_windows.go
deleted file mode 100644
index 3652677..0000000
--- a/server/filesystem/stat_windows.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package filesystem
-
-import (
- "time"
-)
-
-// On linux systems this will return the time that the file was created.
-// However, I have no idea how to do this on windows, so we're skipping it
-// for right now.
-func (s *Stat) CTime() time.Time {
- return s.ModTime()
-}
diff --git a/server/install.go b/server/install.go
index df0061c..cf877d8 100644
--- a/server/install.go
+++ b/server/install.go
@@ -2,7 +2,6 @@ package server
import (
"bufio"
- "bytes"
"context"
"html/template"
"io"
@@ -218,30 +217,18 @@ func (ip *InstallationProcess) tempDir() string {
// can be properly mounted into the installation container and then executed.
func (ip *InstallationProcess) writeScriptToDisk() error {
// Make sure the temp directory root exists before trying to make a directory within it. The
- // ioutil.TempDir call expects this base to exist, it won't create it for you.
+ // os.TempDir call expects this base to exist, it won't create it for you.
if err := os.MkdirAll(ip.tempDir(), 0o700); err != nil {
return errors.WithMessage(err, "could not create temporary directory for install process")
}
-
f, err := os.OpenFile(filepath.Join(ip.tempDir(), "install.sh"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return errors.WithMessage(err, "failed to write server installation script to disk before mount")
}
defer f.Close()
-
- w := bufio.NewWriter(f)
-
- scanner := bufio.NewScanner(bytes.NewReader([]byte(ip.Script.Script)))
- for scanner.Scan() {
- w.WriteString(scanner.Text() + "\n")
- }
-
- if err := scanner.Err(); err != nil {
+ if _, err := io.Copy(f, strings.NewReader(strings.ReplaceAll(ip.Script.Script, "\r\n", "\n"))); err != nil {
return err
}
-
- w.Flush()
-
return nil
}
diff --git a/server/manager.go b/server/manager.go
index 3bf481f..1eb4908 100644
--- a/server/manager.go
+++ b/server/manager.go
@@ -196,7 +196,10 @@ func (m *Manager) InitServer(data remote.ServerConfigurationResponse) (*Server,
return nil, errors.WithStackIf(err)
}
- s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
+ s.fs, err = filesystem.New(filepath.Join(config.Get().System.Data, s.ID()), s.DiskSpace(), s.Config().Egg.FileDenylist)
+ if err != nil {
+ return nil, errors.WithStackIf(err)
+ }
// Right now we only support a Docker based environment, so I'm going to hard code
// this logic in. When we're ready to support other environment we'll need to make
diff --git a/server/transfer/archive.go b/server/transfer/archive.go
index 6a3d51a..599aa0b 100644
--- a/server/transfer/archive.go
+++ b/server/transfer/archive.go
@@ -35,8 +35,8 @@ type Archive struct {
func NewArchive(t *Transfer, size uint64) *Archive {
return &Archive{
archive: &filesystem.Archive{
- BasePath: t.Server.Filesystem().Path(),
- Progress: progress.NewProgress(size),
+ Filesystem: t.Server.Filesystem(),
+ Progress: progress.NewProgress(size),
},
}
}
diff --git a/sftp/handler.go b/sftp/handler.go
index d702282..50641d5 100644
--- a/sftp/handler.go
+++ b/sftp/handler.go
@@ -2,7 +2,6 @@ package sftp
import (
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -122,7 +121,7 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
if !h.can(permission) {
return nil, sftp.ErrSSHFxPermissionDenied
}
- f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC)
+ f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_TRUNC)
if err != nil {
l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system")
return nil, sftp.ErrSSHFxFailure
@@ -220,16 +219,8 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
if !h.can(PermissionFileCreate) {
return sftp.ErrSSHFxPermissionDenied
}
- source, err := h.fs.SafePath(request.Filepath)
- if err != nil {
- return sftp.ErrSSHFxNoSuchFile
- }
- target, err := h.fs.SafePath(request.Target)
- if err != nil {
- return sftp.ErrSSHFxNoSuchFile
- }
- if err := os.Symlink(source, target); err != nil {
- l.WithField("target", target).WithField("error", err).Error("failed to create symlink")
+ if err := h.fs.Symlink(request.Filepath, request.Target); err != nil {
+ l.WithField("target", request.Target).WithField("error", err).Error("failed to create symlink")
return sftp.ErrSSHFxFailure
}
break
@@ -274,16 +265,12 @@ func (h *Handler) Filelist(request *sftp.Request) (sftp.ListerAt, error) {
switch request.Method {
case "List":
- p, err := h.fs.SafePath(request.Filepath)
- if err != nil {
- return nil, sftp.ErrSSHFxNoSuchFile
- }
- files, err := ioutil.ReadDir(p)
+ entries, err := h.fs.ReadDirStat(request.Filepath)
if err != nil {
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory")
return nil, sftp.ErrSSHFxFailure
}
- return ListerAt(files), nil
+ return ListerAt(entries), nil
case "Stat":
st, err := h.fs.Stat(request.Filepath)
if err != nil {
diff --git a/system/system.go b/system/system.go
index 0d3f3cd..d268a35 100644
--- a/system/system.go
+++ b/system/system.go
@@ -6,6 +6,7 @@ import (
"github.com/acobaugh/osrelease"
"github.com/docker/docker/api/types"
+ "github.com/docker/docker/api/types/system"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/parsers/kernel"
)
@@ -121,22 +122,22 @@ func GetSystemInformation() (*Information, error) {
}, nil
}
-func GetDockerInfo(ctx context.Context) (types.Version, types.Info, error) {
+func GetDockerInfo(ctx context.Context) (types.Version, system.Info, error) {
// TODO: find a way to re-use the client from the docker environment.
c, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
- return types.Version{}, types.Info{}, err
+ return types.Version{}, system.Info{}, err
}
defer c.Close()
dockerVersion, err := c.ServerVersion(ctx)
if err != nil {
- return types.Version{}, types.Info{}, err
+ return types.Version{}, system.Info{}, err
}
dockerInfo, err := c.Info(ctx)
if err != nil {
- return types.Version{}, types.Info{}, err
+ return types.Version{}, system.Info{}, err
}
return dockerVersion, dockerInfo, nil