Merge branch 'develop' into schrej/refactor
This commit is contained in:
		
						commit
						ab86fb703a
					
				
							
								
								
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							|  | @ -22,6 +22,7 @@ | ||||||
| 
 | 
 | ||||||
| # ignore configuration file | # ignore configuration file | ||||||
| /config.yml | /config.yml | ||||||
|  | /config*.yml | ||||||
| 
 | 
 | ||||||
| # Ignore Vagrant stuff | # Ignore Vagrant stuff | ||||||
| /.vagrant | /.vagrant | ||||||
|  |  | ||||||
|  | @ -1,5 +1,12 @@ | ||||||
| # Changelog | # Changelog | ||||||
| 
 | 
 | ||||||
|  | ## v1.2.3 | ||||||
|  | ### Fixed | ||||||
|  | * **[Security]** Fixes a remaining security vulnerability in the code handling remote file downloads for servers relating to redirect validation. | ||||||
|  | 
 | ||||||
|  | ### Added | ||||||
|  | * Adds a configuration key at `api.disable_remote_download` that can be set to `true` to completely download the remote download system. | ||||||
|  | 
 | ||||||
| ## v1.2.2 | ## v1.2.2 | ||||||
| ### Fixed | ### Fixed | ||||||
| * Reverts changes to logic handling blocking until a server process is done running when polling stats. This change exposed a bug in the underlying Docker system causing servers to enter a state in which Wings was unable to terminate the process and Docker commands would hang if executed against the container. | * Reverts changes to logic handling blocking until a server process is done running when polling stats. This change exposed a bug in the underlying Docker system causing servers to enter a state in which Wings was unable to terminate the process and Docker commands would hang if executed against the container. | ||||||
|  |  | ||||||
							
								
								
									
										11
									
								
								Dockerfile
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								Dockerfile
									
									
									
									
									
								
							|  | @ -2,32 +2,21 @@ | ||||||
| FROM golang:1.15-alpine3.12 AS builder | FROM golang:1.15-alpine3.12 AS builder | ||||||
| 
 | 
 | ||||||
| ARG VERSION | ARG VERSION | ||||||
| 
 |  | ||||||
| RUN apk add --update --no-cache git=2.26.2-r0 make=4.3-r0 upx=3.96-r0 | RUN apk add --update --no-cache git=2.26.2-r0 make=4.3-r0 upx=3.96-r0 | ||||||
| 
 |  | ||||||
| WORKDIR /app/ | WORKDIR /app/ | ||||||
| 
 |  | ||||||
| COPY go.mod go.sum /app/ | COPY go.mod go.sum /app/ | ||||||
| RUN go mod download | RUN go mod download | ||||||
| 
 |  | ||||||
| COPY . /app/ | COPY . /app/ | ||||||
| 
 |  | ||||||
| RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \ | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \ | ||||||
|     -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=$VERSION" \ |     -ldflags="-s -w -X github.com/pterodactyl/wings/system.Version=$VERSION" \ | ||||||
|     -v \ |     -v \ | ||||||
|     -trimpath \ |     -trimpath \ | ||||||
|     -o wings \ |     -o wings \ | ||||||
|     wings.go |     wings.go | ||||||
| 
 |  | ||||||
| RUN upx wings | RUN upx wings | ||||||
| 
 | 
 | ||||||
| # --------------------------------------- # |  | ||||||
| 
 |  | ||||||
| # Stage 2 (Final) | # Stage 2 (Final) | ||||||
| FROM busybox:1.33.0 | FROM busybox:1.33.0 | ||||||
| 
 |  | ||||||
| RUN echo "ID=\"busybox\"" > /etc/os-release | RUN echo "ID=\"busybox\"" > /etc/os-release | ||||||
| 
 |  | ||||||
| COPY --from=builder /app/wings /usr/bin/ | COPY --from=builder /app/wings /usr/bin/ | ||||||
| 
 |  | ||||||
| CMD [ "wings", "--config", "/etc/pterodactyl/config.yml" ] | CMD [ "wings", "--config", "/etc/pterodactyl/config.yml" ] | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										8
									
								
								Makefile
									
									
									
									
									
								
							|  | @ -4,7 +4,13 @@ build: | ||||||
| 
 | 
 | ||||||
| debug: | debug: | ||||||
| 	go build -race | 	go build -race | ||||||
| 	./wings --debug --ignore-certificate-errors --config config.yml | 	sudo ./wings --debug --ignore-certificate-errors --config config.yml | ||||||
|  | 
 | ||||||
|  | # Runs a remotly debuggable session for Wings allowing an IDE to connect and target
 | ||||||
|  | # different breakpoints.
 | ||||||
|  | rmdebug: | ||||||
|  | 	go build -gcflags "all=-N -l" -race | ||||||
|  | 	sudo dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec ./wings -- --debug --ignore-certificate-errors --config config.yml | ||||||
| 
 | 
 | ||||||
| compress: | compress: | ||||||
| 	upx --brute build/wings_* | 	upx --brute build/wings_* | ||||||
|  |  | ||||||
|  | @ -62,12 +62,6 @@ func (r *Request) ValidateSftpCredentials(request SftpAuthRequest) (*SftpAuthRes | ||||||
| 	e := resp.Error() | 	e := resp.Error() | ||||||
| 	if e != nil { | 	if e != nil { | ||||||
| 		if resp.StatusCode >= 400 && resp.StatusCode < 500 { | 		if resp.StatusCode >= 400 && resp.StatusCode < 500 { | ||||||
| 			log.WithFields(log.Fields{ |  | ||||||
| 				"subsystem": "sftp", |  | ||||||
| 				"username":  request.User, |  | ||||||
| 				"ip":        request.IP, |  | ||||||
| 			}).Warn(e.Error()) |  | ||||||
| 
 |  | ||||||
| 			return nil, &sftpInvalidCredentialsError{} | 			return nil, &sftpInvalidCredentialsError{} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,61 +0,0 @@ | ||||||
| package cmd |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"os" |  | ||||||
| 	"path/filepath" |  | ||||||
| 
 |  | ||||||
| 	"github.com/pterodactyl/wings/config" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| // We've gone through a couple of iterations of where the configuration is stored. This
 |  | ||||||
| // helpful little function will look through the three areas it might have ended up, and
 |  | ||||||
| // return it.
 |  | ||||||
| //
 |  | ||||||
| // We only run this if the configuration flag for the instance is not actually passed in
 |  | ||||||
| // via the command line. Once found, the configuration is moved into the expected default
 |  | ||||||
| // location. Only errors are returned from this function, you can safely assume that after
 |  | ||||||
| // running this the configuration can be found in the correct default location.
 |  | ||||||
| func RelocateConfiguration() error { |  | ||||||
| 	var match string |  | ||||||
| 	check := []string{ |  | ||||||
| 		config.DefaultLocation, |  | ||||||
| 		"/var/lib/pterodactyl/config.yml", |  | ||||||
| 		"/etc/wings/config.yml", |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Loop over all of the configuration paths, and return which one we found, if
 |  | ||||||
| 	// any.
 |  | ||||||
| 	for _, p := range check { |  | ||||||
| 		if s, err := os.Stat(p); err != nil { |  | ||||||
| 			if !os.IsNotExist(err) { |  | ||||||
| 				return err |  | ||||||
| 			} |  | ||||||
| 		} else if !s.IsDir() { |  | ||||||
| 			match = p |  | ||||||
| 			break |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Just return a generic not exist error at this point if we didn't have a match, this
 |  | ||||||
| 	// will allow the caller to handle displaying a more friendly error to the user. If we
 |  | ||||||
| 	// did match in the default location, go ahead and return successfully.
 |  | ||||||
| 	if match == "" { |  | ||||||
| 		return os.ErrNotExist |  | ||||||
| 	} else if match == config.DefaultLocation { |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// The rest of this function simply creates the new default location and moves the
 |  | ||||||
| 	// old configuration file over to the new location, then sets the permissions on the
 |  | ||||||
| 	// file correctly so that only the user running this process can read it.
 |  | ||||||
| 	p, _ := filepath.Split(config.DefaultLocation) |  | ||||||
| 	if err := os.MkdirAll(p, 0755); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := os.Rename(match, config.DefaultLocation); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return os.Chmod(config.DefaultLocation, 0600) |  | ||||||
| } |  | ||||||
|  | @ -147,7 +147,7 @@ func configureCmdRun(cmd *cobra.Command, args []string) { | ||||||
| 
 | 
 | ||||||
| 	b, err := ioutil.ReadAll(res.Body) | 	b, err := ioutil.ReadAll(res.Body) | ||||||
| 
 | 
 | ||||||
| 	cfg, err := config.NewFromPath(configPath) | 	cfg, err := config.NewAtPath(configPath) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		panic(err) | 		panic(err) | ||||||
| 	} | 	} | ||||||
|  | @ -156,7 +156,7 @@ func configureCmdRun(cmd *cobra.Command, args []string) { | ||||||
| 		panic(err) | 		panic(err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err = cfg.WriteToDisk(); err != nil { | 	if err = config.WriteToDisk(cfg); err != nil { | ||||||
| 		panic(err) | 		panic(err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -15,14 +15,15 @@ import ( | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"time" | 	"time" | ||||||
| 
 | 
 | ||||||
| 	"github.com/pterodactyl/wings/environment" |  | ||||||
| 
 |  | ||||||
| 	"github.com/AlecAivazis/survey/v2" | 	"github.com/AlecAivazis/survey/v2" | ||||||
| 	"github.com/AlecAivazis/survey/v2/terminal" | 	"github.com/AlecAivazis/survey/v2/terminal" | ||||||
|  | 	"github.com/apex/log" | ||||||
| 	"github.com/docker/docker/api/types" | 	"github.com/docker/docker/api/types" | ||||||
| 	"github.com/docker/docker/pkg/parsers/kernel" | 	"github.com/docker/docker/pkg/parsers/kernel" | ||||||
| 	"github.com/docker/docker/pkg/parsers/operatingsystem" | 	"github.com/docker/docker/pkg/parsers/operatingsystem" | ||||||
| 	"github.com/pterodactyl/wings/config" | 	"github.com/pterodactyl/wings/config" | ||||||
|  | 	"github.com/pterodactyl/wings/environment" | ||||||
|  | 	"github.com/pterodactyl/wings/loggers/cli" | ||||||
| 	"github.com/pterodactyl/wings/system" | 	"github.com/pterodactyl/wings/system" | ||||||
| 	"github.com/spf13/cobra" | 	"github.com/spf13/cobra" | ||||||
| ) | ) | ||||||
|  | @ -40,15 +41,21 @@ var ( | ||||||
| 	} | 	} | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| var diagnosticsCmd = &cobra.Command{ | func newDiagnosticsCommand() *cobra.Command { | ||||||
|  | 	command := &cobra.Command{ | ||||||
| 		Use:   "diagnostics", | 		Use:   "diagnostics", | ||||||
| 	Short: "Collect diagnostics information.", | 		Short: "Collect and report information about this Wings instance to assist in debugging.", | ||||||
|  | 		PreRun: func(cmd *cobra.Command, args []string) { | ||||||
|  | 			initConfig() | ||||||
|  | 			log.SetHandler(cli.Default) | ||||||
|  | 		}, | ||||||
| 		Run: diagnosticsCmdRun, | 		Run: diagnosticsCmdRun, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| func init() { | 	command.Flags().StringVar(&diagnosticsArgs.HastebinURL, "hastebin-url", DefaultHastebinUrl, "the url of the hastebin instance to use") | ||||||
| 	diagnosticsCmd.PersistentFlags().StringVar(&diagnosticsArgs.HastebinURL, "hastebin-url", DefaultHastebinUrl, "The url of the hastebin instance to use.") | 	command.Flags().IntVar(&diagnosticsArgs.LogLines, "log-lines", DefaultLogLines, "the number of log lines to include in the report") | ||||||
| 	diagnosticsCmd.PersistentFlags().IntVar(&diagnosticsArgs.LogLines, "log-lines", DefaultLogLines, "The number of log lines to include in the report") | 
 | ||||||
|  | 	return command | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
 | // diagnosticsCmdRun collects diagnostics about wings, it's configuration and the node.
 | ||||||
|  | @ -85,7 +92,6 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	dockerVersion, dockerInfo, dockerErr := getDockerInfo() | 	dockerVersion, dockerInfo, dockerErr := getDockerInfo() | ||||||
| 	_ = dockerInfo |  | ||||||
| 
 | 
 | ||||||
| 	output := &strings.Builder{} | 	output := &strings.Builder{} | ||||||
| 	fmt.Fprintln(output, "Pterodactyl Wings - Diagnostics Report") | 	fmt.Fprintln(output, "Pterodactyl Wings - Diagnostics Report") | ||||||
|  | @ -102,8 +108,10 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	printHeader(output, "Wings Configuration") | 	printHeader(output, "Wings Configuration") | ||||||
| 	cfg, err := config.ReadConfiguration(config.DefaultLocation) | 	if err := config.FromFile(config.DefaultLocation); err != nil { | ||||||
| 	if cfg != nil { | 
 | ||||||
|  | 	} | ||||||
|  | 	cfg := config.Get() | ||||||
| 	fmt.Fprintln(output, "    Panel Location:", redact(cfg.PanelLocation)) | 	fmt.Fprintln(output, "    Panel Location:", redact(cfg.PanelLocation)) | ||||||
| 	fmt.Fprintln(output, "") | 	fmt.Fprintln(output, "") | ||||||
| 	fmt.Fprintln(output, " Internal Webserver:", redact(cfg.Api.Host), ":", cfg.Api.Port) | 	fmt.Fprintln(output, " Internal Webserver:", redact(cfg.Api.Host), ":", cfg.Api.Port) | ||||||
|  | @ -123,11 +131,9 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) { | ||||||
| 	fmt.Fprintln(output, "           Username:", cfg.System.Username) | 	fmt.Fprintln(output, "           Username:", cfg.System.Username) | ||||||
| 	fmt.Fprintln(output, "        Server Time:", time.Now().Format(time.RFC1123Z)) | 	fmt.Fprintln(output, "        Server Time:", time.Now().Format(time.RFC1123Z)) | ||||||
| 	fmt.Fprintln(output, "         Debug Mode:", cfg.Debug) | 	fmt.Fprintln(output, "         Debug Mode:", cfg.Debug) | ||||||
| 	} else { |  | ||||||
| 		fmt.Println("Failed to load configuration.", err) |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	printHeader(output, "Docker: Info") | 	printHeader(output, "Docker: Info") | ||||||
|  | 	if dockerErr == nil { | ||||||
| 		fmt.Fprintln(output, "Server Version:", dockerInfo.ServerVersion) | 		fmt.Fprintln(output, "Server Version:", dockerInfo.ServerVersion) | ||||||
| 		fmt.Fprintln(output, "Storage Driver:", dockerInfo.Driver) | 		fmt.Fprintln(output, "Storage Driver:", dockerInfo.Driver) | ||||||
| 		if dockerInfo.DriverStatus != nil { | 		if dockerInfo.DriverStatus != nil { | ||||||
|  | @ -147,6 +153,9 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) { | ||||||
| 				fmt.Fprintln(output, w) | 				fmt.Fprintln(output, w) | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | 	} else { | ||||||
|  | 		fmt.Fprintln(output, dockerErr.Error()) | ||||||
|  | 	} | ||||||
| 
 | 
 | ||||||
| 	printHeader(output, "Docker: Running Containers") | 	printHeader(output, "Docker: Running Containers") | ||||||
| 	c := exec.Command("docker", "ps") | 	c := exec.Command("docker", "ps") | ||||||
|  | @ -180,23 +189,23 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) { | ||||||
| 		survey.AskOne(&survey.Confirm{Message: "Upload to " + diagnosticsArgs.HastebinURL + "?", Default: false}, &upload) | 		survey.AskOne(&survey.Confirm{Message: "Upload to " + diagnosticsArgs.HastebinURL + "?", Default: false}, &upload) | ||||||
| 	} | 	} | ||||||
| 	if upload { | 	if upload { | ||||||
| 		url, err := uploadToHastebin(diagnosticsArgs.HastebinURL, output.String()) | 		u, err := uploadToHastebin(diagnosticsArgs.HastebinURL, output.String()) | ||||||
| 		if err == nil { | 		if err == nil { | ||||||
| 			fmt.Println("Your report is available here: ", url) | 			fmt.Println("Your report is available here: ", u) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func getDockerInfo() (types.Version, types.Info, error) { | func getDockerInfo() (types.Version, types.Info, error) { | ||||||
| 	cli, err := environment.DockerClient() | 	client, err := environment.Docker() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return types.Version{}, types.Info{}, err | 		return types.Version{}, types.Info{}, err | ||||||
| 	} | 	} | ||||||
| 	dockerVersion, err := cli.ServerVersion(context.Background()) | 	dockerVersion, err := client.ServerVersion(context.Background()) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return types.Version{}, types.Info{}, err | 		return types.Version{}, types.Info{}, err | ||||||
| 	} | 	} | ||||||
| 	dockerInfo, err := cli.Info(context.Background()) | 	dockerInfo, err := client.Info(context.Background()) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return types.Version{}, types.Info{}, err | 		return types.Version{}, types.Info{}, err | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
							
								
								
									
										218
									
								
								cmd/root.go
									
									
									
									
									
								
							
							
						
						
									
										218
									
								
								cmd/root.go
									
									
									
									
									
								
							|  | @ -9,6 +9,7 @@ import ( | ||||||
| 	"os" | 	"os" | ||||||
| 	"path" | 	"path" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
|  | 	"strconv" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"time" | 	"time" | ||||||
| 
 | 
 | ||||||
|  | @ -41,6 +42,8 @@ var rootCommand = &cobra.Command{ | ||||||
| 	Use:   "wings", | 	Use:   "wings", | ||||||
| 	Short: "Runs the API server allowing programatic control of game servers for Pterodactyl Panel.", | 	Short: "Runs the API server allowing programatic control of game servers for Pterodactyl Panel.", | ||||||
| 	PreRun: func(cmd *cobra.Command, args []string) { | 	PreRun: func(cmd *cobra.Command, args []string) { | ||||||
|  | 		initConfig() | ||||||
|  | 		initLogging() | ||||||
| 		if tls, _ := cmd.Flags().GetBool("auto-tls"); tls { | 		if tls, _ := cmd.Flags().GetBool("auto-tls"); tls { | ||||||
| 			if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" { | 			if host, _ := cmd.Flags().GetString("tls-hostname"); host == "" { | ||||||
| 				fmt.Println("A TLS hostname must be provided when running wings with automatic TLS, e.g.:\n\n    ./wings --auto-tls --tls-hostname my.example.com") | 				fmt.Println("A TLS hostname must be provided when running wings with automatic TLS, e.g.:\n\n    ./wings --auto-tls --tls-hostname my.example.com") | ||||||
|  | @ -77,28 +80,7 @@ func init() { | ||||||
| 
 | 
 | ||||||
| 	rootCommand.AddCommand(versionCommand) | 	rootCommand.AddCommand(versionCommand) | ||||||
| 	rootCommand.AddCommand(configureCmd) | 	rootCommand.AddCommand(configureCmd) | ||||||
| 	rootCommand.AddCommand(diagnosticsCmd) | 	rootCommand.AddCommand(newDiagnosticsCommand()) | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Get the configuration path based on the arguments provided.
 |  | ||||||
| func readConfiguration() (*config.Configuration, error) { |  | ||||||
| 	p := configPath |  | ||||||
| 	if !strings.HasPrefix(p, "/") { |  | ||||||
| 		d, err := os.Getwd() |  | ||||||
| 		if err != nil { |  | ||||||
| 			return nil, err |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		p = path.Clean(path.Join(d, configPath)) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if s, err := os.Stat(p); err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| 	} else if s.IsDir() { |  | ||||||
| 		return nil, errors.New("cannot use directory as configuration file path") |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return config.ReadConfiguration(p) |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func rootCmdRun(cmd *cobra.Command, _ []string) { | func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
|  | @ -121,35 +103,9 @@ func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
| 		defer profile.Start(profile.BlockProfile).Stop() | 		defer profile.Start(profile.BlockProfile).Stop() | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Only attempt configuration file relocation if a custom location has not
 |  | ||||||
| 	// been specified in the command startup.
 |  | ||||||
| 	if configPath == config.DefaultLocation { |  | ||||||
| 		if err := RelocateConfiguration(); err != nil { |  | ||||||
| 			if errors.Is(err, os.ErrNotExist) { |  | ||||||
| 				exitWithConfigurationNotice() |  | ||||||
| 			} |  | ||||||
| 			panic(err) |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	c, err := readConfiguration() |  | ||||||
| 	if err != nil { |  | ||||||
| 		panic(err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if debug { |  | ||||||
| 		c.Debug = true |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	printLogo() | 	printLogo() | ||||||
| 	if err := configureLogging(c.System.LogDirectory, c.Debug); err != nil { |  | ||||||
| 		panic(err) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	log.WithField("path", c.GetPath()).Info("loading configuration from path") |  | ||||||
| 	if c.Debug { |  | ||||||
| 	log.Debug("running in debug mode") | 	log.Debug("running in debug mode") | ||||||
| 	} | 	log.WithField("config_file", configPath).Info("loading configuration from file") | ||||||
| 
 | 
 | ||||||
| 	if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok { | 	if ok, _ := cmd.Flags().GetBool("ignore-certificate-errors"); ok { | ||||||
| 		log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified") | 		log.Warn("running with --ignore-certificate-errors: TLS certificate host chains and name will not be verified") | ||||||
|  | @ -158,37 +114,28 @@ func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	config.Set(c) | 	if err := config.ConfigureTimezone(); err != nil { | ||||||
| 	config.SetDebugViaFlag(debug) |  | ||||||
| 
 |  | ||||||
| 	if err := c.System.ConfigureTimezone(); err != nil { |  | ||||||
| 		log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value") | 		log.WithField("error", err).Fatal("failed to detect system timezone or use supplied configuration value") | ||||||
| 		return |  | ||||||
| 	} | 	} | ||||||
| 
 | 	log.WithField("timezone", config.Get().System.Timezone).Info("configured wings with system timezone") | ||||||
| 	log.WithField("timezone", c.System.Timezone).Info("configured wings with system timezone") | 	if err := config.ConfigureDirectories(); err != nil { | ||||||
| 
 |  | ||||||
| 	if err := c.System.ConfigureDirectories(); err != nil { |  | ||||||
| 		log.WithField("error", err).Fatal("failed to configure system directories for pterodactyl") | 		log.WithField("error", err).Fatal("failed to configure system directories for pterodactyl") | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 	if err := config.EnableLogRotation(); err != nil { | ||||||
| 	if err := c.System.EnableLogRotation(); err != nil { |  | ||||||
| 		log.WithField("error", err).Fatal("failed to configure log rotation on the system") | 		log.WithField("error", err).Fatal("failed to configure log rotation on the system") | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	log.WithField("username", c.System.Username).Info("checking for pterodactyl system user") | 	log.WithField("username", config.Get().System.User).Info("checking for pterodactyl system user") | ||||||
| 	if su, err := c.EnsurePterodactylUser(); err != nil { | 	if err := config.EnsurePterodactylUser(); err != nil { | ||||||
| 		log.WithField("error", err).Fatal("failed to create pterodactyl system user") | 		log.WithField("error", err).Fatal("failed to create pterodactyl system user") | ||||||
| 		return |  | ||||||
| 	} else { |  | ||||||
| 		log.WithFields(log.Fields{ |  | ||||||
| 			"username": su.Username, |  | ||||||
| 			"uid":      su.Uid, |  | ||||||
| 			"gid":      su.Gid, |  | ||||||
| 		}).Info("configured system user successfully") |  | ||||||
| 	} | 	} | ||||||
|  | 	log.WithFields(log.Fields{ | ||||||
|  | 		"username": config.Get().System.Username, | ||||||
|  | 		"uid":      config.Get().System.User.Uid, | ||||||
|  | 		"gid":      config.Get().System.User.Gid, | ||||||
|  | 	}).Info("configured system user successfully") | ||||||
| 
 | 
 | ||||||
| 	panelClient := remote.CreateClient( | 	panelClient := remote.CreateClient( | ||||||
| 		config.Get().PanelLocation, | 		config.Get().PanelLocation, | ||||||
|  | @ -202,16 +149,14 @@ func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
| 
 | 
 | ||||||
| 	if err := serverManager.Initialize(int(c.RemoteQuery.BootServersPerPage)); err != nil { | 	if err := serverManager.Initialize(int(c.RemoteQuery.BootServersPerPage)); err != nil { | ||||||
| 		log.WithField("error", err).Fatal("failed to load server configurations") | 		log.WithField("error", err).Fatal("failed to load server configurations") | ||||||
| 		return |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := environment.ConfigureDocker(&c.Docker); err != nil { | 	if err := environment.ConfigureDocker(cmd.Context()); err != nil { | ||||||
| 		log.WithField("error", err).Fatal("failed to configure docker environment") | 		log.WithField("error", err).Fatal("failed to configure docker environment") | ||||||
| 		return |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if err := c.WriteToDisk(); err != nil { | 	if err := config.WriteToDisk(config.Get()); err != nil { | ||||||
| 		log.WithField("error", err).Error("failed to save configuration to disk") | 		log.WithField("error", err).Fatal("failed to write configuration to disk") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Just for some nice log output.
 | 	// Just for some nice log output.
 | ||||||
|  | @ -228,10 +173,15 @@ func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
| 	// on Wings. This allows us to ensure the environment exists, write configurations,
 | 	// on Wings. This allows us to ensure the environment exists, write configurations,
 | ||||||
| 	// and reboot processes without causing a slow-down due to sequential booting.
 | 	// and reboot processes without causing a slow-down due to sequential booting.
 | ||||||
| 	pool := workerpool.New(4) | 	pool := workerpool.New(4) | ||||||
| 
 |  | ||||||
| 	for _, serv := range serverManager.GetAll() { | 	for _, serv := range serverManager.GetAll() { | ||||||
| 		s := serv | 		s := serv | ||||||
| 
 | 
 | ||||||
|  | 		// For each server we encounter make sure the root data directory exists.
 | ||||||
|  | 		if err := s.EnsureDataDirectoryExists(); err != nil { | ||||||
|  | 			s.Log().Error("could not create root data directory for server: not loading server...") | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
| 		pool.Submit(func() { | 		pool.Submit(func() { | ||||||
| 			s.Log().Info("configuring server environment and restoring to previous state") | 			s.Log().Info("configuring server environment and restoring to previous state") | ||||||
| 
 | 
 | ||||||
|  | @ -283,20 +233,30 @@ func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
| 
 | 
 | ||||||
| 	// Wait until all of the servers are ready to go before we fire up the SFTP and HTTP servers.
 | 	// Wait until all of the servers are ready to go before we fire up the SFTP and HTTP servers.
 | ||||||
| 	pool.StopWait() | 	pool.StopWait() | ||||||
|  | 	defer func() { | ||||||
|  | 		// Cancel the context on all of the running servers at this point, even though the
 | ||||||
|  | 		// program is just shutting down.
 | ||||||
|  | 		for _, s := range server.GetServers().All() { | ||||||
|  | 			s.CtxCancel() | ||||||
|  | 		} | ||||||
|  | 	}() | ||||||
| 
 | 
 | ||||||
| 	// Initialize the SFTP server.
 | 	go func() { | ||||||
| 	if err := sftp.Initialize(c.System); err != nil { | 		// Run the SFTP server.
 | ||||||
|  | 		if err := sftp.New().Run(); err != nil { | ||||||
| 			log.WithError(err).Fatal("failed to initialize the sftp server") | 			log.WithError(err).Fatal("failed to initialize the sftp server") | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
|  | 	}() | ||||||
| 
 | 
 | ||||||
|  | 	sys := config.Get().System | ||||||
| 	// Ensure the archive directory exists.
 | 	// Ensure the archive directory exists.
 | ||||||
| 	if err := os.MkdirAll(c.System.ArchiveDirectory, 0755); err != nil { | 	if err := os.MkdirAll(sys.ArchiveDirectory, 0755); err != nil { | ||||||
| 		log.WithField("error", err).Error("failed to create archive directory") | 		log.WithField("error", err).Error("failed to create archive directory") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Ensure the backup directory exists.
 | 	// Ensure the backup directory exists.
 | ||||||
| 	if err := os.MkdirAll(c.System.BackupDirectory, 0755); err != nil { | 	if err := os.MkdirAll(sys.BackupDirectory, 0755); err != nil { | ||||||
| 		log.WithField("error", err).Error("failed to create backup directory") | 		log.WithField("error", err).Error("failed to create backup directory") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -306,47 +266,31 @@ func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
| 		autotls = false | 		autotls = false | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	api := config.Get().Api | ||||||
| 	log.WithFields(log.Fields{ | 	log.WithFields(log.Fields{ | ||||||
| 		"use_ssl":      c.Api.Ssl.Enabled, | 		"use_ssl":      api.Ssl.Enabled, | ||||||
| 		"use_auto_tls": autotls, | 		"use_auto_tls": autotls, | ||||||
| 		"host_address": c.Api.Host, | 		"host_address": api.Host, | ||||||
| 		"host_port":    c.Api.Port, | 		"host_port":    api.Port, | ||||||
| 	}).Info("configuring internal webserver") | 	}).Info("configuring internal webserver") | ||||||
| 
 | 
 | ||||||
| 	// Configure the router.
 | 	// Create a new HTTP server instance to handle inbound requests from the Panel
 | ||||||
| 	r := router.Configure(serverManager) | 	// and external clients.
 | ||||||
| 
 |  | ||||||
| 	s := &http.Server{ | 	s := &http.Server{ | ||||||
| 		Addr:    fmt.Sprintf("%s:%d", c.Api.Host, c.Api.Port), | 		Addr:      api.Host + ":" + strconv.Itoa(api.Port), | ||||||
| 		Handler: r, | 		Handler:   router.Configure(serverManager), | ||||||
| 		TLSConfig: &tls.Config{ | 		TLSConfig: config.DefaultTLSConfig, | ||||||
| 			NextProtos: []string{"h2", "http/1.1"}, |  | ||||||
| 			// @see https://blog.cloudflare.com/exposing-go-on-the-internet
 |  | ||||||
| 			CipherSuites: []uint16{ |  | ||||||
| 				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, |  | ||||||
| 				tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, |  | ||||||
| 				tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, |  | ||||||
| 				tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, |  | ||||||
| 				tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, |  | ||||||
| 				tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, |  | ||||||
| 			}, |  | ||||||
| 			PreferServerCipherSuites: true, |  | ||||||
| 			MinVersion:               tls.VersionTLS12, |  | ||||||
| 			MaxVersion:               tls.VersionTLS13, |  | ||||||
| 			CurvePreferences:         []tls.CurveID{tls.X25519, tls.CurveP256}, |  | ||||||
| 		}, |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Check if the server should run with TLS but using autocert.
 | 	// Check if the server should run with TLS but using autocert.
 | ||||||
| 	if autotls { | 	if autotls { | ||||||
| 		m := autocert.Manager{ | 		m := autocert.Manager{ | ||||||
| 			Prompt:     autocert.AcceptTOS, | 			Prompt:     autocert.AcceptTOS, | ||||||
| 			Cache:      autocert.DirCache(path.Join(c.System.RootDirectory, "/.tls-cache")), | 			Cache:      autocert.DirCache(path.Join(sys.RootDirectory, "/.tls-cache")), | ||||||
| 			HostPolicy: autocert.HostWhitelist(tlshostname), | 			HostPolicy: autocert.HostWhitelist(tlshostname), | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		log.WithField("hostname", tlshostname). | 		log.WithField("hostname", tlshostname).Info("webserver is now listening with auto-TLS enabled; certificates will be automatically generated by Let's Encrypt") | ||||||
| 			Info("webserver is now listening with auto-TLS enabled; certificates will be automatically generated by Let's Encrypt") |  | ||||||
| 
 | 
 | ||||||
| 		// Hook autocert into the main http server.
 | 		// Hook autocert into the main http server.
 | ||||||
| 		s.TLSConfig.GetCertificate = m.GetCertificate | 		s.TLSConfig.GetCertificate = m.GetCertificate | ||||||
|  | @ -358,29 +302,26 @@ func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
| 				log.WithError(err).Error("failed to serve autocert http server") | 				log.WithError(err).Error("failed to serve autocert http server") | ||||||
| 			} | 			} | ||||||
| 		}() | 		}() | ||||||
| 
 |  | ||||||
| 		// Start the main http server with TLS using autocert.
 | 		// Start the main http server with TLS using autocert.
 | ||||||
| 		if err := s.ListenAndServeTLS("", ""); err != nil { | 		if err := s.ListenAndServeTLS("", ""); err != nil { | ||||||
| 			log.WithFields(log.Fields{"auto_tls": true, "tls_hostname": tlshostname, "error": err}). | 			log.WithFields(log.Fields{"auto_tls": true, "tls_hostname": tlshostname, "error": err}).Fatal("failed to configure HTTP server using auto-tls") | ||||||
| 				Fatal("failed to configure HTTP server using auto-tls") |  | ||||||
| 		} | 		} | ||||||
| 
 |  | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Check if main http server should run with TLS.
 | 	// Check if main http server should run with TLS. Otherwise reset the TLS
 | ||||||
| 	if c.Api.Ssl.Enabled { | 	// config on the server and then serve it over normal HTTP.
 | ||||||
| 		if err := s.ListenAndServeTLS(strings.ToLower(c.Api.Ssl.CertificateFile), strings.ToLower(c.Api.Ssl.KeyFile)); err != nil { | 	if api.Ssl.Enabled { | ||||||
|  | 		if err := s.ListenAndServeTLS(strings.ToLower(api.Ssl.CertificateFile), strings.ToLower(api.Ssl.KeyFile)); err != nil { | ||||||
| 			log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server") | 			log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server") | ||||||
| 		} | 		} | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	// Run the main http server without TLS.
 |  | ||||||
| 	s.TLSConfig = nil | 	s.TLSConfig = nil | ||||||
| 	if err := s.ListenAndServe(); err != nil { | 	if err := s.ListenAndServe(); err != nil { | ||||||
| 		log.WithField("error", err).Fatal("failed to configure HTTP server") | 		log.WithField("error", err).Fatal("failed to configure HTTP server") | ||||||
| 	} | 	} | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| 	// Cancel the context on all of the running servers at this point, even though the
 | 	// Cancel the context on all of the running servers at this point, even though the
 | ||||||
| 	// program is just shutting down.
 | 	// program is just shutting down.
 | ||||||
|  | @ -389,28 +330,46 @@ func rootCmdRun(cmd *cobra.Command, _ []string) { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // Reads the configuration from the disk and then sets up the global singleton
 | ||||||
|  | // with all of the configuration values.
 | ||||||
|  | func initConfig() { | ||||||
|  | 	if !strings.HasPrefix(configPath, "/") { | ||||||
|  | 		d, err := os.Getwd() | ||||||
|  | 		if err != nil { | ||||||
|  | 			log2.Fatalf("cmd/root: could not determine directory: %s", err) | ||||||
|  | 		} | ||||||
|  | 		configPath = path.Clean(path.Join(d, configPath)) | ||||||
|  | 	} | ||||||
|  | 	err := config.FromFile(configPath) | ||||||
|  | 	if err != nil { | ||||||
|  | 		if errors.Is(err, os.ErrNotExist) { | ||||||
|  | 			exitWithConfigurationNotice() | ||||||
|  | 		} | ||||||
|  | 		log2.Fatalf("cmd/root: error while reading configuration file: %s", err) | ||||||
|  | 	} | ||||||
|  | 	if debug && !config.Get().Debug { | ||||||
|  | 		config.SetDebugViaFlag(debug) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // Configures the global logger for Zap so that we can call it from any location
 | // Configures the global logger for Zap so that we can call it from any location
 | ||||||
| // in the code without having to pass around a logger instance.
 | // in the code without having to pass around a logger instance.
 | ||||||
| func configureLogging(logDir string, debug bool) error { | func initLogging() { | ||||||
| 	if err := os.MkdirAll(path.Join(logDir, "/install"), 0700); err != nil { | 	dir := config.Get().System.LogDirectory | ||||||
| 		return err | 	if err := os.MkdirAll(path.Join(dir, "/install"), 0700); err != nil { | ||||||
|  | 		log2.Fatalf("cmd/root: failed to create install directory path: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 | 	p := filepath.Join(dir, "/wings.log") | ||||||
| 	p := filepath.Join(logDir, "/wings.log") |  | ||||||
| 	w, err := logrotate.NewFile(p) | 	w, err := logrotate.NewFile(p) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		log2.Fatalf("cmd/root: failed to create wings log: %s", err) | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	log.SetLevel(log.InfoLevel) | 	log.SetLevel(log.InfoLevel) | ||||||
| 	if debug { | 	if config.Get().Debug { | ||||||
| 		log.SetLevel(log.DebugLevel) | 		log.SetLevel(log.DebugLevel) | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	log.SetHandler(multi.New(cli.Default, cli.New(w.File, false))) | 	log.SetHandler(multi.New(cli.Default, cli.New(w.File, false))) | ||||||
| 	log.WithField("path", p).Info("writing log files to disk") | 	log.WithField("path", p).Info("writing log files to disk") | ||||||
| 
 |  | ||||||
| 	return nil |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Prints the wings logo, nothing special here!
 | // Prints the wings logo, nothing special here!
 | ||||||
|  | @ -439,11 +398,8 @@ func exitWithConfigurationNotice() { | ||||||
| [_red_][white][bold]Error: Configuration File Not Found[reset] | [_red_][white][bold]Error: Configuration File Not Found[reset] | ||||||
| 
 | 
 | ||||||
| Wings was not able to locate your configuration file, and therefore is not | Wings was not able to locate your configuration file, and therefore is not | ||||||
| able to complete its boot process. | able to complete its boot process. Please ensure you have copied your instance | ||||||
| 
 | configuration file into the default location below. | ||||||
| Please ensure you have copied your instance configuration file into |  | ||||||
| the default location, or have provided the --config flag to use a |  | ||||||
| custom location. |  | ||||||
| 
 | 
 | ||||||
| Default Location: /etc/pterodactyl/config.yml | Default Location: /etc/pterodactyl/config.yml | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
							
								
								
									
										682
									
								
								config/config.go
									
									
									
									
									
								
							
							
						
						
									
										682
									
								
								config/config.go
									
									
									
									
									
								
							|  | @ -1,33 +1,247 @@ | ||||||
| package config | package config | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"crypto/tls" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| 	"os" | 	"os" | ||||||
| 	"os/exec" | 	"os/exec" | ||||||
| 	"os/user" | 	"os/user" | ||||||
| 	"strconv" | 	"path" | ||||||
|  | 	"path/filepath" | ||||||
|  | 	"regexp" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"sync" | 	"sync" | ||||||
|  | 	"text/template" | ||||||
|  | 	"time" | ||||||
| 
 | 
 | ||||||
| 	"emperror.dev/errors" | 	"emperror.dev/errors" | ||||||
|  | 	"github.com/apex/log" | ||||||
| 	"github.com/cobaugh/osrelease" | 	"github.com/cobaugh/osrelease" | ||||||
| 	"github.com/creasty/defaults" | 	"github.com/creasty/defaults" | ||||||
| 	"github.com/gbrlsnchs/jwt/v3" | 	"github.com/gbrlsnchs/jwt/v3" | ||||||
|  | 	"github.com/pterodactyl/wings/system" | ||||||
| 	"gopkg.in/yaml.v2" | 	"gopkg.in/yaml.v2" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| const DefaultLocation = "/etc/pterodactyl/config.yml" | const DefaultLocation = "/etc/pterodactyl/config.yml" | ||||||
| 
 | 
 | ||||||
| type Configuration struct { | // DefaultTLSConfig sets sane defaults to use when configuring the internal
 | ||||||
| 	sync.RWMutex `json:"-" yaml:"-"` | // webserver to listen for public connections.
 | ||||||
|  | //
 | ||||||
|  | // @see https://blog.cloudflare.com/exposing-go-on-the-internet
 | ||||||
|  | var DefaultTLSConfig = &tls.Config{ | ||||||
|  | 	NextProtos: []string{"h2", "http/1.1"}, | ||||||
|  | 	CipherSuites: []uint16{ | ||||||
|  | 		tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, | ||||||
|  | 		tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, | ||||||
|  | 		tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, | ||||||
|  | 		tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, | ||||||
|  | 		tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, | ||||||
|  | 		tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, | ||||||
|  | 	}, | ||||||
|  | 	PreferServerCipherSuites: true, | ||||||
|  | 	MinVersion:               tls.VersionTLS12, | ||||||
|  | 	MaxVersion:               tls.VersionTLS13, | ||||||
|  | 	CurvePreferences:         []tls.CurveID{tls.X25519, tls.CurveP256}, | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| 	// The location from which this configuration instance was instantiated.
 | var mu sync.RWMutex | ||||||
| 	path string | var _config *Configuration | ||||||
|  | var _jwtAlgo *jwt.HMACSHA | ||||||
|  | var _debugViaFlag bool | ||||||
| 
 | 
 | ||||||
| // Locker specific to writing the configuration to the disk, this happens
 | // Locker specific to writing the configuration to the disk, this happens
 | ||||||
| // in areas that might already be locked so we don't want to crash the process.
 | // in areas that might already be locked so we don't want to crash the process.
 | ||||||
| 	writeLock sync.Mutex | var _writeLock sync.Mutex | ||||||
|  | 
 | ||||||
|  | // SftpConfiguration defines the configuration of the internal SFTP server.
 | ||||||
|  | type SftpConfiguration struct { | ||||||
|  | 	// The bind address of the SFTP server.
 | ||||||
|  | 	Address string `default:"0.0.0.0" json:"bind_address" yaml:"bind_address"` | ||||||
|  | 	// The bind port of the SFTP server.
 | ||||||
|  | 	Port int `default:"2022" json:"bind_port" yaml:"bind_port"` | ||||||
|  | 	// If set to true, no write actions will be allowed on the SFTP server.
 | ||||||
|  | 	ReadOnly bool `default:"false" yaml:"read_only"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ApiConfiguration defines the configuration for the internal API that is
 | ||||||
|  | // exposed by the Wings webserver.
 | ||||||
|  | type ApiConfiguration struct { | ||||||
|  | 	// The interface that the internal webserver should bind to.
 | ||||||
|  | 	Host string `default:"0.0.0.0" yaml:"host"` | ||||||
|  | 
 | ||||||
|  | 	// The port that the internal webserver should bind to.
 | ||||||
|  | 	Port int `default:"8080" yaml:"port"` | ||||||
|  | 
 | ||||||
|  | 	// SSL configuration for the daemon.
 | ||||||
|  | 	Ssl struct { | ||||||
|  | 		Enabled         bool   `json:"enabled" yaml:"enabled"` | ||||||
|  | 		CertificateFile string `json:"cert" yaml:"cert"` | ||||||
|  | 		KeyFile         string `json:"key" yaml:"key"` | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Determines if functionality for allowing remote download of files into server directories
 | ||||||
|  | 	// is enabled on this instance. If set to "true" remote downloads will not be possible for
 | ||||||
|  | 	// servers.
 | ||||||
|  | 	DisableRemoteDownload bool `json:"disable_remote_download" yaml:"disable_remote_download"` | ||||||
|  | 
 | ||||||
|  | 	// The maximum size for files uploaded through the Panel in bytes.
 | ||||||
|  | 	UploadLimit int `default:"100" json:"upload_limit" yaml:"upload_limit"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RemoteQueryConfiguration defines the configuration settings for remote requests
 | ||||||
|  | // from Wings to the Panel.
 | ||||||
|  | type RemoteQueryConfiguration struct { | ||||||
|  | 	// The amount of time in seconds that Wings should allow for a request to the Panel API
 | ||||||
|  | 	// to complete. If this time passes the request will be marked as failed. If your requests
 | ||||||
|  | 	// are taking longer than 30 seconds to complete it is likely a performance issue that
 | ||||||
|  | 	// should be resolved on the Panel, and not something that should be resolved by upping this
 | ||||||
|  | 	// number.
 | ||||||
|  | 	Timeout uint `default:"30" yaml:"timeout"` | ||||||
|  | 
 | ||||||
|  | 	// The number of servers to load in a single request to the Panel API when booting the
 | ||||||
|  | 	// Wings instance. A single request is initially made to the Panel to get this number
 | ||||||
|  | 	// of servers, and then the pagination status is checked and additional requests are
 | ||||||
|  | 	// fired off in parallel to request the remaining pages.
 | ||||||
|  | 	//
 | ||||||
|  | 	// It is not recommended to change this from the default as you will likely encounter
 | ||||||
|  | 	// memory limits on your Panel instance. In the grand scheme of things 4 requests for
 | ||||||
|  | 	// 50 servers is likely just as quick as two for 100 or one for 400, and will certainly
 | ||||||
|  | 	// be less likely to cause performance issues on the Panel.
 | ||||||
|  | 	BootServersPerPage uint `default:"50" yaml:"boot_servers_per_page"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SystemConfiguration defines basic system configuration settings.
 | ||||||
|  | type SystemConfiguration struct { | ||||||
|  | 	// The root directory where all of the pterodactyl data is stored at.
 | ||||||
|  | 	RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"` | ||||||
|  | 
 | ||||||
|  | 	// Directory where logs for server installations and other wings events are logged.
 | ||||||
|  | 	LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"` | ||||||
|  | 
 | ||||||
|  | 	// Directory where the server data is stored at.
 | ||||||
|  | 	Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"` | ||||||
|  | 
 | ||||||
|  | 	// Directory where server archives for transferring will be stored.
 | ||||||
|  | 	ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"` | ||||||
|  | 
 | ||||||
|  | 	// Directory where local backups will be stored on the machine.
 | ||||||
|  | 	BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"` | ||||||
|  | 
 | ||||||
|  | 	// The user that should own all of the server files, and be used for containers.
 | ||||||
|  | 	Username string `default:"pterodactyl" yaml:"username"` | ||||||
|  | 
 | ||||||
|  | 	// The timezone for this Wings instance. This is detected by Wings automatically if possible,
 | ||||||
|  | 	// and falls back to UTC if not able to be detected. If you need to set this manually, that
 | ||||||
|  | 	// can also be done.
 | ||||||
|  | 	//
 | ||||||
|  | 	// This timezone value is passed into all containers created by Wings.
 | ||||||
|  | 	Timezone string `yaml:"timezone"` | ||||||
|  | 
 | ||||||
|  | 	// Definitions for the user that gets created to ensure that we can quickly access
 | ||||||
|  | 	// this information without constantly having to do a system lookup.
 | ||||||
|  | 	User struct { | ||||||
|  | 		Uid int | ||||||
|  | 		Gid int | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// The amount of time in seconds that can elapse before a server's disk space calculation is
 | ||||||
|  | 	// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
 | ||||||
|  | 	// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
 | ||||||
|  | 	// process.
 | ||||||
|  | 	//
 | ||||||
|  | 	// Set to 0 to disable disk checking entirely. This will always return 0 for the disk space used
 | ||||||
|  | 	// by a server and should only be set in extreme scenarios where performance is critical and
 | ||||||
|  | 	// disk usage is not a concern.
 | ||||||
|  | 	DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"` | ||||||
|  | 
 | ||||||
|  | 	// If set to true, file permissions for a server will be checked when the process is
 | ||||||
|  | 	// booted. This can cause boot delays if the server has a large amount of files. In most
 | ||||||
|  | 	// cases disabling this should not have any major impact unless external processes are
 | ||||||
|  | 	// frequently modifying a servers' files.
 | ||||||
|  | 	CheckPermissionsOnBoot bool `default:"true" yaml:"check_permissions_on_boot"` | ||||||
|  | 
 | ||||||
|  | 	// If set to false Wings will not attempt to write a log rotate configuration to the disk
 | ||||||
|  | 	// when it boots and one is not detected.
 | ||||||
|  | 	EnableLogRotate bool `default:"true" yaml:"enable_log_rotate"` | ||||||
|  | 
 | ||||||
|  | 	// The number of lines to send when a server connects to the websocket.
 | ||||||
|  | 	WebsocketLogCount int `default:"150" yaml:"websocket_log_count"` | ||||||
|  | 
 | ||||||
|  | 	Sftp SftpConfiguration `yaml:"sftp"` | ||||||
|  | 
 | ||||||
|  | 	CrashDetection CrashDetection `yaml:"crash_detection"` | ||||||
|  | 
 | ||||||
|  | 	Backups Backups `yaml:"backups"` | ||||||
|  | 
 | ||||||
|  | 	Transfers Transfers `yaml:"transfers"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type CrashDetection struct { | ||||||
|  | 	// Determines if Wings should detect a server that stops with a normal exit code of
 | ||||||
|  | 	// "0" as being crashed if the process stopped without any Wings interaction. E.g.
 | ||||||
|  | 	// the user did not press the stop button, but the process stopped cleanly.
 | ||||||
|  | 	DetectCleanExitAsCrash bool `default:"true" yaml:"detect_clean_exit_as_crash"` | ||||||
|  | 
 | ||||||
|  | 	// Timeout specifies the timeout between crashes that will not cause the server
 | ||||||
|  | 	// to be automatically restarted, this value is used to prevent servers from
 | ||||||
|  | 	// becoming stuck in a boot-loop after multiple consecutive crashes.
 | ||||||
|  | 	Timeout int `default:"60" json:"timeout"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Backups struct { | ||||||
|  | 	// WriteLimit imposes a Disk I/O write limit on backups to the disk, this affects all
 | ||||||
|  | 	// backup drivers as the archiver must first write the file to the disk in order to
 | ||||||
|  | 	// upload it to any external storage provider.
 | ||||||
|  | 	//
 | ||||||
|  | 	// If the value is less than 1, the write speed is unlimited,
 | ||||||
|  | 	// if the value is greater than 0, the write speed is the value in MiB/s.
 | ||||||
|  | 	//
 | ||||||
|  | 	// Defaults to 0 (unlimited)
 | ||||||
|  | 	WriteLimit int `default:"0" yaml:"write_limit"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Transfers struct { | ||||||
|  | 	// DownloadLimit imposes a Network I/O read limit when downloading a transfer archive.
 | ||||||
|  | 	//
 | ||||||
|  | 	// If the value is less than 1, the write speed is unlimited,
 | ||||||
|  | 	// if the value is greater than 0, the write speed is the value in MiB/s.
 | ||||||
|  | 	//
 | ||||||
|  | 	// Defaults to 0 (unlimited)
 | ||||||
|  | 	DownloadLimit int `default:"0" yaml:"download_limit"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type ConsoleThrottles struct { | ||||||
|  | 	// Whether or not the throttler is enabled for this instance.
 | ||||||
|  | 	Enabled bool `json:"enabled" yaml:"enabled" default:"true"` | ||||||
|  | 
 | ||||||
|  | 	// The total number of lines that can be output in a given LineResetInterval period before
 | ||||||
|  | 	// a warning is triggered and counted against the server.
 | ||||||
|  | 	Lines uint64 `json:"lines" yaml:"lines" default:"2000"` | ||||||
|  | 
 | ||||||
|  | 	// The total number of throttle activations that can accumulate before a server is considered
 | ||||||
|  | 	// to be breaching and will be stopped. This value is decremented by one every DecayInterval.
 | ||||||
|  | 	MaximumTriggerCount uint64 `json:"maximum_trigger_count" yaml:"maximum_trigger_count" default:"5"` | ||||||
|  | 
 | ||||||
|  | 	// The amount of time after which the number of lines processed is reset to 0. This runs in
 | ||||||
|  | 	// a constant loop and is not affected by the current console output volumes. By default, this
 | ||||||
|  | 	// will reset the processed line count back to 0 every 100ms.
 | ||||||
|  | 	LineResetInterval uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"` | ||||||
|  | 
 | ||||||
|  | 	// The amount of time in milliseconds that must pass without an output warning being triggered
 | ||||||
|  | 	// before a throttle activation is decremented.
 | ||||||
|  | 	DecayInterval uint64 `json:"decay_interval" yaml:"decay_interval" default:"10000"` | ||||||
|  | 
 | ||||||
|  | 	// The amount of time that a server is allowed to be stopping for before it is terminated
 | ||||||
|  | 	// forcefully if it triggers output throttles.
 | ||||||
|  | 	StopGracePeriod uint `json:"stop_grace_period" yaml:"stop_grace_period" default:"15"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | type Configuration struct { | ||||||
|  | 	// The location from which this configuration instance was instantiated.
 | ||||||
|  | 	path string | ||||||
| 
 | 
 | ||||||
| 	// Determines if wings should be running in debug mode. This value is ignored
 | 	// Determines if wings should be running in debug mode. This value is ignored
 | ||||||
| 	// if the debug flag is passed through the command line arguments.
 | 	// if the debug flag is passed through the command line arguments.
 | ||||||
|  | @ -67,288 +281,336 @@ type Configuration struct { | ||||||
| 	AllowedOrigins []string `json:"allowed_origins" yaml:"allowed_origins"` | 	AllowedOrigins []string `json:"allowed_origins" yaml:"allowed_origins"` | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Defines the configuration of the internal SFTP server.
 | // NewAtPath creates a new struct and set the path where it should be stored.
 | ||||||
| type SftpConfiguration struct { | // This function does not modify the currently stored global configuration.
 | ||||||
| 	// The bind address of the SFTP server.
 | func NewAtPath(path string) (*Configuration, error) { | ||||||
| 	Address string `default:"0.0.0.0" json:"bind_address" yaml:"bind_address"` | 	var c Configuration | ||||||
| 	// The bind port of the SFTP server.
 |  | ||||||
| 	Port int `default:"2022" json:"bind_port" yaml:"bind_port"` |  | ||||||
| 	// If set to true, no write actions will be allowed on the SFTP server.
 |  | ||||||
| 	ReadOnly bool `default:"false" yaml:"read_only"` |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Defines the configuration for the internal API that is exposed by the
 |  | ||||||
| // daemon webserver.
 |  | ||||||
| type ApiConfiguration struct { |  | ||||||
| 	// The interface that the internal webserver should bind to.
 |  | ||||||
| 	Host string `default:"0.0.0.0" yaml:"host"` |  | ||||||
| 
 |  | ||||||
| 	// The port that the internal webserver should bind to.
 |  | ||||||
| 	Port int `default:"8080" yaml:"port"` |  | ||||||
| 
 |  | ||||||
| 	// SSL configuration for the daemon.
 |  | ||||||
| 	Ssl struct { |  | ||||||
| 		Enabled         bool   `default:"false"` |  | ||||||
| 		CertificateFile string `json:"cert" yaml:"cert"` |  | ||||||
| 		KeyFile         string `json:"key" yaml:"key"` |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// The maximum size for files uploaded through the Panel in bytes.
 |  | ||||||
| 	UploadLimit int `default:"100" json:"upload_limit" yaml:"upload_limit"` |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Defines the configuration settings for remote requests from Wings to the Panel.
 |  | ||||||
| type RemoteQueryConfiguration struct { |  | ||||||
| 	// The amount of time in seconds that Wings should allow for a request to the Panel API
 |  | ||||||
| 	// to complete. If this time passes the request will be marked as failed. If your requests
 |  | ||||||
| 	// are taking longer than 30 seconds to complete it is likely a performance issue that
 |  | ||||||
| 	// should be resolved on the Panel, and not something that should be resolved by upping this
 |  | ||||||
| 	// number.
 |  | ||||||
| 	Timeout uint `default:"30" yaml:"timeout"` |  | ||||||
| 
 |  | ||||||
| 	// The number of servers to load in a single request to the Panel API when booting the
 |  | ||||||
| 	// Wings instance. A single request is initially made to the Panel to get this number
 |  | ||||||
| 	// of servers, and then the pagination status is checked and additional requests are
 |  | ||||||
| 	// fired off in parallel to request the remaining pages.
 |  | ||||||
| 	//
 |  | ||||||
| 	// It is not recommended to change this from the default as you will likely encounter
 |  | ||||||
| 	// memory limits on your Panel instance. In the grand scheme of things 4 requests for
 |  | ||||||
| 	// 50 servers is likely just as quick as two for 100 or one for 400, and will certainly
 |  | ||||||
| 	// be less likely to cause performance issues on the Panel.
 |  | ||||||
| 	BootServersPerPage uint `default:"50" yaml:"boot_servers_per_page"` |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Reads the configuration from the provided file and returns the configuration
 |  | ||||||
| // object that can then be used.
 |  | ||||||
| func ReadConfiguration(path string) (*Configuration, error) { |  | ||||||
| 	b, err := ioutil.ReadFile(path) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	c := new(Configuration) |  | ||||||
| 	// Configures the default values for many of the configuration options present
 | 	// Configures the default values for many of the configuration options present
 | ||||||
| 	// in the structs. Values set in the configuration file take priority over the
 | 	// in the structs. Values set in the configuration file take priority over the
 | ||||||
| 	// default values.
 | 	// default values.
 | ||||||
| 	if err := defaults.Set(c); err != nil { | 	if err := defaults.Set(&c); err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	// Track the location where we created this configuration.
 | 	// Track the location where we created this configuration.
 | ||||||
| 	c.unsafeSetPath(path) | 	c.path = path | ||||||
| 
 | 	return &c, nil | ||||||
| 	// Replace environment variables within the configuration file with their
 |  | ||||||
| 	// values from the host system.
 |  | ||||||
| 	b = []byte(os.ExpandEnv(string(b))) |  | ||||||
| 
 |  | ||||||
| 	if err := yaml.Unmarshal(b, c); err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 	return c, nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| var mu sync.RWMutex |  | ||||||
| 
 |  | ||||||
| var _config *Configuration |  | ||||||
| var _jwtAlgo *jwt.HMACSHA |  | ||||||
| var _debugViaFlag bool |  | ||||||
| 
 |  | ||||||
| // Set the global configuration instance. This is a blocking operation such that
 | // Set the global configuration instance. This is a blocking operation such that
 | ||||||
| // anything trying to set a different configuration value, or read the configuration
 | // anything trying to set a different configuration value, or read the configuration
 | ||||||
| // will be paused until it is complete.
 | // will be paused until it is complete.
 | ||||||
| func Set(c *Configuration) { | func Set(c *Configuration) { | ||||||
| 	mu.Lock() | 	mu.Lock() | ||||||
| 
 |  | ||||||
| 	if _config == nil || _config.AuthenticationToken != c.AuthenticationToken { | 	if _config == nil || _config.AuthenticationToken != c.AuthenticationToken { | ||||||
| 		_jwtAlgo = jwt.NewHS256([]byte(c.AuthenticationToken)) | 		_jwtAlgo = jwt.NewHS256([]byte(c.AuthenticationToken)) | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	_config = c | 	_config = c | ||||||
| 	mu.Unlock() | 	mu.Unlock() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // SetDebugViaFlag tracks if the application is running in debug mode because of
 | ||||||
|  | // a command line flag argument. If so we do not want to store that configuration
 | ||||||
|  | // change to the disk.
 | ||||||
| func SetDebugViaFlag(d bool) { | func SetDebugViaFlag(d bool) { | ||||||
|  | 	mu.Lock() | ||||||
|  | 	_config.Debug = d | ||||||
| 	_debugViaFlag = d | 	_debugViaFlag = d | ||||||
|  | 	mu.Unlock() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Get the global configuration instance. This is a read-safe operation that will block
 | // Get returns the global configuration instance. This is a thread-safe operation
 | ||||||
| // if the configuration is presently being modified.
 | // that will block if the configuration is presently being modified.
 | ||||||
|  | //
 | ||||||
|  | // Be aware that you CANNOT make modifications to the currently stored configuration
 | ||||||
|  | // by modifying the struct returned by this function. The only way to make
 | ||||||
|  | // modifications is by using the Update() function and passing data through in
 | ||||||
|  | // the callback.
 | ||||||
| func Get() *Configuration { | func Get() *Configuration { | ||||||
| 	mu.RLock() | 	mu.RLock() | ||||||
| 	defer mu.RUnlock() | 	// Create a copy of the struct so that all modifications made beyond this
 | ||||||
| 
 | 	// point are immutable.
 | ||||||
| 	return _config | 	//goland:noinspection GoVetCopyLock
 | ||||||
|  | 	c := *_config | ||||||
|  | 	mu.RUnlock() | ||||||
|  | 	return &c | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Returns the in-memory JWT algorithm.
 | // Update performs an in-situ update of the global configuration object using
 | ||||||
|  | // a thread-safe mutex lock. This is the correct way to make modifications to
 | ||||||
|  | // the global configuration.
 | ||||||
|  | func Update(callback func(c *Configuration)) { | ||||||
|  | 	mu.Lock() | ||||||
|  | 	callback(_config) | ||||||
|  | 	mu.Unlock() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetJwtAlgorithm returns the in-memory JWT algorithm.
 | ||||||
| func GetJwtAlgorithm() *jwt.HMACSHA { | func GetJwtAlgorithm() *jwt.HMACSHA { | ||||||
| 	mu.RLock() | 	mu.RLock() | ||||||
| 	defer mu.RUnlock() | 	defer mu.RUnlock() | ||||||
| 
 |  | ||||||
| 	return _jwtAlgo | 	return _jwtAlgo | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Create a new struct and set the path where it should be stored.
 | // WriteToDisk writes the configuration to the disk. This is a thread safe operation
 | ||||||
| func NewFromPath(path string) (*Configuration, error) { | // and will only allow one write at a time. Additional calls while writing are
 | ||||||
| 	c := new(Configuration) | // queued up.
 | ||||||
| 	if err := defaults.Set(c); err != nil { | func WriteToDisk(c *Configuration) error { | ||||||
| 		return c, err | 	_writeLock.Lock() | ||||||
|  | 	defer _writeLock.Unlock() | ||||||
|  | 
 | ||||||
|  | 	//goland:noinspection GoVetCopyLock
 | ||||||
|  | 	ccopy := *c | ||||||
|  | 	// If debugging is set with the flag, don't save that to the configuration file,
 | ||||||
|  | 	// otherwise you'll always end up in debug mode.
 | ||||||
|  | 	if _debugViaFlag { | ||||||
|  | 		ccopy.Debug = false | ||||||
|  | 	} | ||||||
|  | 	if c.path == "" { | ||||||
|  | 		return errors.New("cannot write configuration, no path defined in struct") | ||||||
|  | 	} | ||||||
|  | 	b, err := yaml.Marshal(&ccopy) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	if err := ioutil.WriteFile(c.path, b, 0600); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 	c.unsafeSetPath(path) | // EnsurePterodactylUser ensures that the Pterodactyl core user exists on the
 | ||||||
| 
 | // system. This user will be the owner of all data in the root data directory
 | ||||||
| 	return c, nil | // and is used as the user within containers. If files are not owned by this
 | ||||||
| } | // user there will be issues with permissions on Docker mount points.
 | ||||||
| 
 |  | ||||||
| // Sets the path where the configuration file is located on the server. This function should
 |  | ||||||
| // not be called except by processes that are generating the configuration such as the configuration
 |  | ||||||
| // command shipped with this software.
 |  | ||||||
| func (c *Configuration) unsafeSetPath(path string) { |  | ||||||
| 	c.Lock() |  | ||||||
| 	c.path = path |  | ||||||
| 	c.Unlock() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Returns the path for this configuration file.
 |  | ||||||
| func (c *Configuration) GetPath() string { |  | ||||||
| 	c.RLock() |  | ||||||
| 	defer c.RUnlock() |  | ||||||
| 
 |  | ||||||
| 	return c.path |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Ensures that the Pterodactyl core user exists on the system. This user will be the
 |  | ||||||
| // owner of all data in the root data directory and is used as the user within containers.
 |  | ||||||
| //
 | //
 | ||||||
| // If files are not owned by this user there will be issues with permissions on Docker
 | // This function IS NOT thread safe and should only be called in the main thread
 | ||||||
| // mount points.
 | // when the application is booting.
 | ||||||
| func (c *Configuration) EnsurePterodactylUser() (*user.User, error) { | func EnsurePterodactylUser() error { | ||||||
| 	sysName, err := getSystemName() | 	sysName, err := getSystemName() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Our way of detecting if wings is running inside of Docker.
 | 	// Our way of detecting if wings is running inside of Docker.
 | ||||||
| 	if sysName == "busybox" { | 	if sysName == "busybox" { | ||||||
| 		uid := os.Getenv("WINGS_UID") | 		_config.System.Username = system.FirstNotEmpty(os.Getenv("WINGS_USERNAME"), "pterodactyl") | ||||||
| 		if uid == "" { | 		_config.System.User.Uid = system.MustInt(system.FirstNotEmpty(os.Getenv("WINGS_UID"), "988")) | ||||||
| 			uid = "988" | 		_config.System.User.Gid = system.MustInt(system.FirstNotEmpty(os.Getenv("WINGS_UID"), "988")) | ||||||
|  | 		return nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 		gid := os.Getenv("WINGS_GID") | 	u, err := user.Lookup(_config.System.Username) | ||||||
| 		if gid == "" { |  | ||||||
| 			gid = "988" |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		username := os.Getenv("WINGS_USERNAME") |  | ||||||
| 		if username == "" { |  | ||||||
| 			username = "pterodactyl" |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		u := &user.User{ |  | ||||||
| 			Uid:      uid, |  | ||||||
| 			Gid:      gid, |  | ||||||
| 			Username: username, |  | ||||||
| 		} |  | ||||||
| 		return u, c.setSystemUser(u) |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	u, err := user.Lookup(c.System.Username) |  | ||||||
| 
 |  | ||||||
| 	// If an error is returned but it isn't the unknown user error just abort
 | 	// If an error is returned but it isn't the unknown user error just abort
 | ||||||
| 	// the process entirely. If we did find a user, return it immediately.
 | 	// the process entirely. If we did find a user, return it immediately.
 | ||||||
| 	if err == nil { | 	if err != nil { | ||||||
| 		return u, c.setSystemUser(u) | 		if _, ok := err.(user.UnknownUserError); !ok { | ||||||
| 	} else if _, ok := err.(user.UnknownUserError); !ok { | 			return err | ||||||
| 		return nil, err | 		} | ||||||
|  | 	} else { | ||||||
|  | 		_config.System.User.Uid = system.MustInt(u.Uid) | ||||||
|  | 		_config.System.User.Gid = system.MustInt(u.Gid) | ||||||
|  | 		return nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	command := fmt.Sprintf("useradd --system --no-create-home --shell /usr/sbin/nologin %s", c.System.Username) | 	command := fmt.Sprintf("useradd --system --no-create-home --shell /usr/sbin/nologin %s", _config.System.Username) | ||||||
| 
 | 	// Alpine Linux is the only OS we currently support that doesn't work with the useradd
 | ||||||
| 	// Alpine Linux is the only OS we currently support that doesn't work with the useradd command, so
 | 	// command, so in those cases we just modify the command a bit to work as expected.
 | ||||||
| 	// in those cases we just modify the command a bit to work as expected.
 |  | ||||||
| 	if strings.HasPrefix(sysName, "alpine") { | 	if strings.HasPrefix(sysName, "alpine") { | ||||||
| 		command = fmt.Sprintf("adduser -S -D -H -G %[1]s -s /sbin/nologin %[1]s", c.System.Username) | 		command = fmt.Sprintf("adduser -S -D -H -G %[1]s -s /sbin/nologin %[1]s", _config.System.Username) | ||||||
| 
 |  | ||||||
| 		// We have to create the group first on Alpine, so do that here before continuing on
 | 		// We have to create the group first on Alpine, so do that here before continuing on
 | ||||||
| 		// to the user creation process.
 | 		// to the user creation process.
 | ||||||
| 		if _, err := exec.Command("addgroup", "-S", c.System.Username).Output(); err != nil { | 		if _, err := exec.Command("addgroup", "-S", _config.System.Username).Output(); err != nil { | ||||||
| 			return nil, err | 			return err | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	split := strings.Split(command, " ") | 	split := strings.Split(command, " ") | ||||||
| 	if _, err := exec.Command(split[0], split[1:]...).Output(); err != nil { | 	if _, err := exec.Command(split[0], split[1:]...).Output(); err != nil { | ||||||
| 		return nil, err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 	u, err = user.Lookup(_config.System.Username) | ||||||
| 	if u, err := user.Lookup(c.System.Username); err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| 	} else { |  | ||||||
| 		return u, c.setSystemUser(u) |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Set the system user into the configuration and then write it to the disk so that
 |  | ||||||
| // it is persisted on boot.
 |  | ||||||
| func (c *Configuration) setSystemUser(u *user.User) error { |  | ||||||
| 	uid, err := strconv.Atoi(u.Uid) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  | 	_config.System.User.Uid = system.MustInt(u.Uid) | ||||||
|  | 	_config.System.User.Gid = system.MustInt(u.Gid) | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| 	gid, err := strconv.Atoi(u.Gid) | // FromFile reads the configuration from the provided file and stores it in the
 | ||||||
|  | // global singleton for this instance.
 | ||||||
|  | func FromFile(path string) error { | ||||||
|  | 	b, err := ioutil.ReadFile(path) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 	c, err := NewAtPath(path) | ||||||
| 	c.Lock() |  | ||||||
| 	c.System.Username = u.Username |  | ||||||
| 	c.System.User.Uid = uid |  | ||||||
| 	c.System.User.Gid = gid |  | ||||||
| 	c.Unlock() |  | ||||||
| 
 |  | ||||||
| 	return c.WriteToDisk() |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Writes the configuration to the disk as a blocking operation by obtaining an exclusive
 |  | ||||||
| // lock on the file. This prevents something else from writing at the exact same time and
 |  | ||||||
| // leading to bad data conditions.
 |  | ||||||
| func (c *Configuration) WriteToDisk() error { |  | ||||||
| 	// Obtain an exclusive write against the configuration file.
 |  | ||||||
| 	c.writeLock.Lock() |  | ||||||
| 	defer c.writeLock.Unlock() |  | ||||||
| 
 |  | ||||||
| 	ccopy := *c |  | ||||||
| 	// If debugging is set with the flag, don't save that to the configuration file, otherwise
 |  | ||||||
| 	// you'll always end up in debug mode.
 |  | ||||||
| 	if _debugViaFlag { |  | ||||||
| 		ccopy.Debug = false |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if c.path == "" { |  | ||||||
| 		return errors.New("cannot write configuration, no path defined in struct") |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	b, err := yaml.Marshal(&ccopy) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  | 	// Replace environment variables within the configuration file with their
 | ||||||
|  | 	// values from the host system.
 | ||||||
|  | 	b = []byte(os.ExpandEnv(string(b))) | ||||||
|  | 	if err := yaml.Unmarshal(b, c); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	// Store this configuration in the global state.
 | ||||||
|  | 	Set(c) | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
| 
 | 
 | ||||||
| 	if err := ioutil.WriteFile(c.GetPath(), b, 0644); err != nil { | // ConfigureDirectories ensures that all of the system directories exist on the
 | ||||||
|  | // system. These directories are created so that only the owner can read the data,
 | ||||||
|  | // and no other users.
 | ||||||
|  | //
 | ||||||
|  | // This function IS NOT thread-safe.
 | ||||||
|  | func ConfigureDirectories() error { | ||||||
|  | 	root := _config.System.RootDirectory | ||||||
|  | 	log.WithField("path", root).Debug("ensuring root data directory exists") | ||||||
|  | 	if err := os.MkdirAll(root, 0700); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// There are a non-trivial number of users out there whose data directories are actually a
 | ||||||
|  | 	// symlink to another location on the disk. If we do not resolve that final destination at this
 | ||||||
|  | 	// point things will appear to work, but endless errors will be encountered when we try to
 | ||||||
|  | 	// verify accessed paths since they will all end up resolving outside the expected data directory.
 | ||||||
|  | 	//
 | ||||||
|  | 	// For the sake of automating away as much of this as possible, see if the data directory is a
 | ||||||
|  | 	// symlink, and if so resolve to its final real path, and then update the configuration to use
 | ||||||
|  | 	// that.
 | ||||||
|  | 	if d, err := filepath.EvalSymlinks(_config.System.Data); err != nil { | ||||||
|  | 		if !os.IsNotExist(err) { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} else if d != _config.System.Data { | ||||||
|  | 		_config.System.Data = d | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	log.WithField("path", _config.System.Data).Debug("ensuring server data directory exists") | ||||||
|  | 	if err := os.MkdirAll(_config.System.Data, 0700); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	log.WithField("path", _config.System.ArchiveDirectory).Debug("ensuring archive data directory exists") | ||||||
|  | 	if err := os.MkdirAll(_config.System.ArchiveDirectory, 0700); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	log.WithField("path", _config.System.BackupDirectory).Debug("ensuring backup data directory exists") | ||||||
|  | 	if err := os.MkdirAll(_config.System.BackupDirectory, 0700); err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // EnableLogRotation writes a logrotate file for wings to the system logrotate
 | ||||||
|  | // configuration directory if one exists and a logrotate file is not found. This
 | ||||||
|  | // allows us to basically automate away the log rotation for most installs, but
 | ||||||
|  | // also enable users to make modifications on their own.
 | ||||||
|  | //
 | ||||||
|  | // This function IS NOT thread-safe.
 | ||||||
|  | func EnableLogRotation() error { | ||||||
|  | 	if !_config.System.EnableLogRotate { | ||||||
|  | 		log.Info("skipping log rotate configuration, disabled in wings config file") | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if st, err := os.Stat("/etc/logrotate.d"); err != nil && !os.IsNotExist(err) { | ||||||
|  | 		return err | ||||||
|  | 	} else if (err != nil && os.IsNotExist(err)) || !st.IsDir() { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	if _, err := os.Stat("/etc/logrotate.d/wings"); err == nil || !os.IsNotExist(err) { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	log.Info("no log rotation configuration found: adding file now") | ||||||
|  | 	// If we've gotten to this point it means the logrotate directory exists on the system
 | ||||||
|  | 	// but there is not a file for wings already. In that case, let us write a new file to
 | ||||||
|  | 	// it so files can be rotated easily.
 | ||||||
|  | 	f, err := os.Create("/etc/logrotate.d/wings") | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	defer f.Close() | ||||||
|  | 
 | ||||||
|  | 	t, err := template.New("logrotate").Parse(` | ||||||
|  | {{.LogDirectory}}/wings.log { | ||||||
|  |     size 10M | ||||||
|  |     compress | ||||||
|  |     delaycompress | ||||||
|  |     dateext | ||||||
|  |     maxage 7 | ||||||
|  |     missingok | ||||||
|  |     notifempty | ||||||
|  |     create 0640 {{.User.Uid}} {{.User.Gid}} | ||||||
|  |     postrotate | ||||||
|  |         killall -SIGHUP wings | ||||||
|  |     endscript | ||||||
|  | }`) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return errors.Wrap(t.Execute(f, _config.System), "config: failed to write logrotate to disk") | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // GetStatesPath returns the location of the JSON file that tracks server states.
 | ||||||
|  | func (sc *SystemConfiguration) GetStatesPath() string { | ||||||
|  | 	return path.Join(sc.RootDirectory, "/states.json") | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ConfigureTimezone sets the timezone data for the configuration if it is
 | ||||||
|  | // currently missing. If a value has been set, this functionality will only run
 | ||||||
|  | // to validate that the timezone being used is valid.
 | ||||||
|  | //
 | ||||||
|  | // This function IS NOT thread-safe.
 | ||||||
|  | func ConfigureTimezone() error { | ||||||
|  | 	if _config.System.Timezone == "" { | ||||||
|  | 		b, err := ioutil.ReadFile("/etc/timezone") | ||||||
|  | 		if err != nil { | ||||||
|  | 			if !os.IsNotExist(err) { | ||||||
|  | 				return errors.WithMessage(err, "config: failed to open timezone file") | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			_config.System.Timezone = "UTC" | ||||||
|  | 			ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) | ||||||
|  | 			defer cancel() | ||||||
|  | 			// Okay, file isn't found on this OS, we will try using timedatectl to handle this. If this
 | ||||||
|  | 			// command fails, exit, but if it returns a value use that. If no value is returned we will
 | ||||||
|  | 			// fall through to UTC to get Wings booted at least.
 | ||||||
|  | 			out, err := exec.CommandContext(ctx, "timedatectl").Output() | ||||||
|  | 			if err != nil { | ||||||
|  | 				log.WithField("error", err).Warn("failed to execute \"timedatectl\" to determine system timezone, falling back to UTC") | ||||||
|  | 				return nil | ||||||
|  | 			} | ||||||
|  | 
 | ||||||
|  | 			r := regexp.MustCompile(`Time zone: ([\w/]+)`) | ||||||
|  | 			matches := r.FindSubmatch(out) | ||||||
|  | 			if len(matches) != 2 || string(matches[1]) == "" { | ||||||
|  | 				log.Warn("failed to parse timezone from \"timedatectl\" output, falling back to UTC") | ||||||
|  | 				return nil | ||||||
|  | 			} | ||||||
|  | 			_config.System.Timezone = string(matches[1]) | ||||||
|  | 		} else { | ||||||
|  | 			_config.System.Timezone = string(b) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	_config.System.Timezone = regexp.MustCompile(`(?i)[^a-z_/]+`).ReplaceAllString(_config.System.Timezone, "") | ||||||
|  | 	_, err := time.LoadLocation(_config.System.Timezone) | ||||||
|  | 
 | ||||||
|  | 	return errors.WithMessage(err, fmt.Sprintf("the supplied timezone %s is invalid", _config.System.Timezone)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // Gets the system release name.
 | // Gets the system release name.
 | ||||||
| func getSystemName() (string, error) { | func getSystemName() (string, error) { | ||||||
| 	// use osrelease to get release version and ID
 | 	// use osrelease to get release version and ID
 | ||||||
| 	if release, err := osrelease.Read(); err != nil { | 	release, err := osrelease.Read() | ||||||
|  | 	if err != nil { | ||||||
| 		return "", err | 		return "", err | ||||||
| 	} else { | 	} | ||||||
| 	return release["ID"], nil | 	return release["ID"], nil | ||||||
| } | } | ||||||
| } |  | ||||||
|  |  | ||||||
|  | @ -12,7 +12,6 @@ type dockerNetworkInterfaces struct { | ||||||
| 		Subnet  string `default:"172.18.0.0/16"` | 		Subnet  string `default:"172.18.0.0/16"` | ||||||
| 		Gateway string `default:"172.18.0.1"` | 		Gateway string `default:"172.18.0.1"` | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	V6 struct { | 	V6 struct { | ||||||
| 		Subnet  string `default:"fdba:17c8:6c94::/64"` | 		Subnet  string `default:"fdba:17c8:6c94::/64"` | ||||||
| 		Gateway string `default:"fdba:17c8:6c94::1011"` | 		Gateway string `default:"fdba:17c8:6c94::1011"` | ||||||
|  | @ -39,8 +38,8 @@ type DockerNetworkConfiguration struct { | ||||||
| 	Interfaces dockerNetworkInterfaces `yaml:"interfaces"` | 	Interfaces dockerNetworkInterfaces `yaml:"interfaces"` | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Defines the docker configuration used by the daemon when interacting with
 | // DockerConfiguration defines the docker configuration used by the daemon when
 | ||||||
| // containers and networks on the system.
 | // interacting with containers and networks on the system.
 | ||||||
| type DockerConfiguration struct { | type DockerConfiguration struct { | ||||||
| 	// Network configuration that should be used when creating a new network
 | 	// Network configuration that should be used when creating a new network
 | ||||||
| 	// for containers run through the daemon.
 | 	// for containers run through the daemon.
 | ||||||
|  | @ -58,23 +57,22 @@ type DockerConfiguration struct { | ||||||
| 	TmpfsSize uint `default:"100" json:"tmpfs_size" yaml:"tmpfs_size"` | 	TmpfsSize uint `default:"100" json:"tmpfs_size" yaml:"tmpfs_size"` | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // RegistryConfiguration .
 | // RegistryConfiguration defines the authentication credentials for a given
 | ||||||
|  | // Docker registry.
 | ||||||
| type RegistryConfiguration struct { | type RegistryConfiguration struct { | ||||||
| 	Username string `yaml:"username"` | 	Username string `yaml:"username"` | ||||||
| 	Password string `yaml:"password"` | 	Password string `yaml:"password"` | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Base64 .
 | // Base64 returns the authentication for a given registry as a base64 encoded
 | ||||||
|  | // string value.
 | ||||||
| func (c RegistryConfiguration) Base64() (string, error) { | func (c RegistryConfiguration) Base64() (string, error) { | ||||||
| 	authConfig := types.AuthConfig{ | 	b, err := json.Marshal(types.AuthConfig{ | ||||||
| 		Username: c.Username, | 		Username: c.Username, | ||||||
| 		Password: c.Password, | 		Password: c.Password, | ||||||
| 	} | 	}) | ||||||
| 
 |  | ||||||
| 	b, err := json.Marshal(authConfig) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return "", err | 		return "", err | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	return base64.URLEncoding.EncodeToString(b), nil | 	return base64.URLEncoding.EncodeToString(b), nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -1,268 +0,0 @@ | ||||||
| package config |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"context" |  | ||||||
| 	"fmt" |  | ||||||
| 	"html/template" |  | ||||||
| 	"io/ioutil" |  | ||||||
| 	"os" |  | ||||||
| 	"os/exec" |  | ||||||
| 	"path" |  | ||||||
| 	"path/filepath" |  | ||||||
| 	"regexp" |  | ||||||
| 	"time" |  | ||||||
| 
 |  | ||||||
| 	"emperror.dev/errors" |  | ||||||
| 	"github.com/apex/log" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| // Defines basic system configuration settings.
 |  | ||||||
| type SystemConfiguration struct { |  | ||||||
| 	// The root directory where all of the pterodactyl data is stored at.
 |  | ||||||
| 	RootDirectory string `default:"/var/lib/pterodactyl" yaml:"root_directory"` |  | ||||||
| 
 |  | ||||||
| 	// Directory where logs for server installations and other wings events are logged.
 |  | ||||||
| 	LogDirectory string `default:"/var/log/pterodactyl" yaml:"log_directory"` |  | ||||||
| 
 |  | ||||||
| 	// Directory where the server data is stored at.
 |  | ||||||
| 	Data string `default:"/var/lib/pterodactyl/volumes" yaml:"data"` |  | ||||||
| 
 |  | ||||||
| 	// Directory where server archives for transferring will be stored.
 |  | ||||||
| 	ArchiveDirectory string `default:"/var/lib/pterodactyl/archives" yaml:"archive_directory"` |  | ||||||
| 
 |  | ||||||
| 	// Directory where local backups will be stored on the machine.
 |  | ||||||
| 	BackupDirectory string `default:"/var/lib/pterodactyl/backups" yaml:"backup_directory"` |  | ||||||
| 
 |  | ||||||
| 	// The user that should own all of the server files, and be used for containers.
 |  | ||||||
| 	Username string `default:"pterodactyl" yaml:"username"` |  | ||||||
| 
 |  | ||||||
| 	// The timezone for this Wings instance. This is detected by Wings automatically if possible,
 |  | ||||||
| 	// and falls back to UTC if not able to be detected. If you need to set this manually, that
 |  | ||||||
| 	// can also be done.
 |  | ||||||
| 	//
 |  | ||||||
| 	// This timezone value is passed into all containers created by Wings.
 |  | ||||||
| 	Timezone string `yaml:"timezone"` |  | ||||||
| 
 |  | ||||||
| 	// Definitions for the user that gets created to ensure that we can quickly access
 |  | ||||||
| 	// this information without constantly having to do a system lookup.
 |  | ||||||
| 	User struct { |  | ||||||
| 		Uid int |  | ||||||
| 		Gid int |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// The amount of time in seconds that can elapse before a server's disk space calculation is
 |  | ||||||
| 	// considered stale and a re-check should occur. DANGER: setting this value too low can seriously
 |  | ||||||
| 	// impact system performance and cause massive I/O bottlenecks and high CPU usage for the Wings
 |  | ||||||
| 	// process.
 |  | ||||||
| 	//
 |  | ||||||
| 	// Set to 0 to disable disk checking entirely. This will always return 0 for the disk space used
 |  | ||||||
| 	// by a server and should only be set in extreme scenarios where performance is critical and
 |  | ||||||
| 	// disk usage is not a concern.
 |  | ||||||
| 	DiskCheckInterval int64 `default:"150" yaml:"disk_check_interval"` |  | ||||||
| 
 |  | ||||||
| 	// If set to true, file permissions for a server will be checked when the process is
 |  | ||||||
| 	// booted. This can cause boot delays if the server has a large amount of files. In most
 |  | ||||||
| 	// cases disabling this should not have any major impact unless external processes are
 |  | ||||||
| 	// frequently modifying a servers' files.
 |  | ||||||
| 	CheckPermissionsOnBoot bool `default:"true" yaml:"check_permissions_on_boot"` |  | ||||||
| 
 |  | ||||||
| 	// If set to false Wings will not attempt to write a log rotate configuration to the disk
 |  | ||||||
| 	// when it boots and one is not detected.
 |  | ||||||
| 	EnableLogRotate bool `default:"true" yaml:"enable_log_rotate"` |  | ||||||
| 
 |  | ||||||
| 	// The number of lines to send when a server connects to the websocket.
 |  | ||||||
| 	WebsocketLogCount int `default:"150" yaml:"websocket_log_count"` |  | ||||||
| 
 |  | ||||||
| 	Sftp SftpConfiguration `yaml:"sftp"` |  | ||||||
| 
 |  | ||||||
| 	CrashDetection CrashDetection `yaml:"crash_detection"` |  | ||||||
| 
 |  | ||||||
| 	Backups Backups `yaml:"backups"` |  | ||||||
| 
 |  | ||||||
| 	Transfers Transfers `yaml:"transfers"` |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type CrashDetection struct { |  | ||||||
| 	// Determines if Wings should detect a server that stops with a normal exit code of
 |  | ||||||
| 	// "0" as being crashed if the process stopped without any Wings interaction. E.g.
 |  | ||||||
| 	// the user did not press the stop button, but the process stopped cleanly.
 |  | ||||||
| 	DetectCleanExitAsCrash bool `default:"true" yaml:"detect_clean_exit_as_crash"` |  | ||||||
| 
 |  | ||||||
| 	// Timeout specifies the timeout between crashes that will not cause the server
 |  | ||||||
| 	// to be automatically restarted, this value is used to prevent servers from
 |  | ||||||
| 	// becoming stuck in a boot-loop after multiple consecutive crashes.
 |  | ||||||
| 	Timeout int `default:"60" json:"timeout"` |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type Backups struct { |  | ||||||
| 	// WriteLimit imposes a Disk I/O write limit on backups to the disk, this affects all
 |  | ||||||
| 	// backup drivers as the archiver must first write the file to the disk in order to
 |  | ||||||
| 	// upload it to any external storage provider.
 |  | ||||||
| 	//
 |  | ||||||
| 	// If the value is less than 1, the write speed is unlimited,
 |  | ||||||
| 	// if the value is greater than 0, the write speed is the value in MiB/s.
 |  | ||||||
| 	//
 |  | ||||||
| 	// Defaults to 0 (unlimited)
 |  | ||||||
| 	WriteLimit int `default:"0" yaml:"write_limit"` |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| type Transfers struct { |  | ||||||
| 	// DownloadLimit imposes a Network I/O read limit when downloading a transfer archive.
 |  | ||||||
| 	//
 |  | ||||||
| 	// If the value is less than 1, the write speed is unlimited,
 |  | ||||||
| 	// if the value is greater than 0, the write speed is the value in MiB/s.
 |  | ||||||
| 	//
 |  | ||||||
| 	// Defaults to 0 (unlimited)
 |  | ||||||
| 	DownloadLimit int `default:"0" yaml:"download_limit"` |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Ensures that all of the system directories exist on the system. These directories are
 |  | ||||||
| // created so that only the owner can read the data, and no other users.
 |  | ||||||
| func (sc *SystemConfiguration) ConfigureDirectories() error { |  | ||||||
| 	log.WithField("path", sc.RootDirectory).Debug("ensuring root data directory exists") |  | ||||||
| 	if err := os.MkdirAll(sc.RootDirectory, 0700); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// There are a non-trivial number of users out there whose data directories are actually a
 |  | ||||||
| 	// symlink to another location on the disk. If we do not resolve that final destination at this
 |  | ||||||
| 	// point things will appear to work, but endless errors will be encountered when we try to
 |  | ||||||
| 	// verify accessed paths since they will all end up resolving outside the expected data directory.
 |  | ||||||
| 	//
 |  | ||||||
| 	// For the sake of automating away as much of this as possible, see if the data directory is a
 |  | ||||||
| 	// symlink, and if so resolve to its final real path, and then update the configuration to use
 |  | ||||||
| 	// that.
 |  | ||||||
| 	if d, err := filepath.EvalSymlinks(sc.Data); err != nil { |  | ||||||
| 		if !os.IsNotExist(err) { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 	} else if d != sc.Data { |  | ||||||
| 		sc.Data = d |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	log.WithField("path", sc.Data).Debug("ensuring server data directory exists") |  | ||||||
| 	if err := os.MkdirAll(sc.Data, 0700); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	log.WithField("path", sc.ArchiveDirectory).Debug("ensuring archive data directory exists") |  | ||||||
| 	if err := os.MkdirAll(sc.ArchiveDirectory, 0700); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	log.WithField("path", sc.BackupDirectory).Debug("ensuring backup data directory exists") |  | ||||||
| 	if err := os.MkdirAll(sc.BackupDirectory, 0700); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Writes a logrotate file for wings to the system logrotate configuration directory if one
 |  | ||||||
| // exists and a logrotate file is not found. This allows us to basically automate away the log
 |  | ||||||
| // rotation for most installs, but also enable users to make modifications on their own.
 |  | ||||||
| func (sc *SystemConfiguration) EnableLogRotation() error { |  | ||||||
| 	// Do nothing if not enabled.
 |  | ||||||
| 	if sc.EnableLogRotate == false { |  | ||||||
| 		log.Info("skipping log rotate configuration, disabled in wings config file") |  | ||||||
| 
 |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if st, err := os.Stat("/etc/logrotate.d"); err != nil && !os.IsNotExist(err) { |  | ||||||
| 		return err |  | ||||||
| 	} else if (err != nil && os.IsNotExist(err)) || !st.IsDir() { |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if _, err := os.Stat("/etc/logrotate.d/wings"); err != nil && !os.IsNotExist(err) { |  | ||||||
| 		return err |  | ||||||
| 	} else if err == nil { |  | ||||||
| 		return nil |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	log.Info("no log rotation configuration found, system is configured to support it, adding file now") |  | ||||||
| 	// If we've gotten to this point it means the logrotate directory exists on the system
 |  | ||||||
| 	// but there is not a file for wings already. In that case, let us write a new file to
 |  | ||||||
| 	// it so files can be rotated easily.
 |  | ||||||
| 	f, err := os.Create("/etc/logrotate.d/wings") |  | ||||||
| 	if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 	defer f.Close() |  | ||||||
| 
 |  | ||||||
| 	t, err := template.New("logrotate").Parse(` |  | ||||||
| {{.LogDirectory}}/wings.log { |  | ||||||
|     size 10M |  | ||||||
|     compress |  | ||||||
|     delaycompress |  | ||||||
|     dateext |  | ||||||
|     maxage 7 |  | ||||||
|     missingok |  | ||||||
|     notifempty |  | ||||||
|     create 0640 {{.User.Uid}} {{.User.Gid}} |  | ||||||
|     postrotate |  | ||||||
|         killall -SIGHUP wings |  | ||||||
|     endscript |  | ||||||
| }`) |  | ||||||
| 
 |  | ||||||
| 	if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return errors.WithMessage(t.Execute(f, sc), "failed to write logrotate file to disk") |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Returns the location of the JSON file that tracks server states.
 |  | ||||||
| func (sc *SystemConfiguration) GetStatesPath() string { |  | ||||||
| 	return path.Join(sc.RootDirectory, "states.json") |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Returns the location of the JSON file that tracks server states.
 |  | ||||||
| func (sc *SystemConfiguration) GetInstallLogPath() string { |  | ||||||
| 	return path.Join(sc.LogDirectory, "install/") |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Configures the timezone data for the configuration if it is currently missing. If
 |  | ||||||
| // a value has been set, this functionality will only run to validate that the timezone
 |  | ||||||
| // being used is valid.
 |  | ||||||
| func (sc *SystemConfiguration) ConfigureTimezone() error { |  | ||||||
| 	if sc.Timezone == "" { |  | ||||||
| 		if b, err := ioutil.ReadFile("/etc/timezone"); err != nil { |  | ||||||
| 			if !os.IsNotExist(err) { |  | ||||||
| 				return errors.WithMessage(err, "failed to open /etc/timezone for automatic server timezone calibration") |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			ctx, _ := context.WithTimeout(context.Background(), time.Second*5) |  | ||||||
| 			// Okay, file isn't found on this OS, we will try using timedatectl to handle this. If this
 |  | ||||||
| 			// command fails, exit, but if it returns a value use that. If no value is returned we will
 |  | ||||||
| 			// fall through to UTC to get Wings booted at least.
 |  | ||||||
| 			out, err := exec.CommandContext(ctx, "timedatectl").Output() |  | ||||||
| 			if err != nil { |  | ||||||
| 				log.WithField("error", err).Warn("failed to execute \"timedatectl\" to determine system timezone, falling back to UTC") |  | ||||||
| 
 |  | ||||||
| 				sc.Timezone = "UTC" |  | ||||||
| 				return nil |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			r := regexp.MustCompile(`Time zone: ([\w/]+)`) |  | ||||||
| 			matches := r.FindSubmatch(out) |  | ||||||
| 			if len(matches) != 2 || string(matches[1]) == "" { |  | ||||||
| 				log.Warn("failed to parse timezone from \"timedatectl\" output, falling back to UTC") |  | ||||||
| 
 |  | ||||||
| 				sc.Timezone = "UTC" |  | ||||||
| 				return nil |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			sc.Timezone = string(matches[1]) |  | ||||||
| 		} else { |  | ||||||
| 			sc.Timezone = string(b) |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	sc.Timezone = regexp.MustCompile(`(?i)[^a-z_/]+`).ReplaceAllString(sc.Timezone, "") |  | ||||||
| 
 |  | ||||||
| 	_, err := time.LoadLocation(sc.Timezone) |  | ||||||
| 
 |  | ||||||
| 	return errors.WithMessage(err, fmt.Sprintf("the supplied timezone %s is invalid", sc.Timezone)) |  | ||||||
| } |  | ||||||
|  | @ -1,27 +0,0 @@ | ||||||
| package config |  | ||||||
| 
 |  | ||||||
| type ConsoleThrottles struct { |  | ||||||
| 	// Whether or not the throttler is enabled for this instance.
 |  | ||||||
| 	Enabled bool `json:"enabled" yaml:"enabled" default:"true"` |  | ||||||
| 
 |  | ||||||
| 	// The total number of lines that can be output in a given LineResetInterval period before
 |  | ||||||
| 	// a warning is triggered and counted against the server.
 |  | ||||||
| 	Lines uint64 `json:"lines" yaml:"lines" default:"2000"` |  | ||||||
| 
 |  | ||||||
| 	// The total number of throttle activations that can accumulate before a server is considered
 |  | ||||||
| 	// to be breaching and will be stopped. This value is decremented by one every DecayInterval.
 |  | ||||||
| 	MaximumTriggerCount uint64 `json:"maximum_trigger_count" yaml:"maximum_trigger_count" default:"5"` |  | ||||||
| 
 |  | ||||||
| 	// The amount of time after which the number of lines processed is reset to 0. This runs in
 |  | ||||||
| 	// a constant loop and is not affected by the current console output volumes. By default, this
 |  | ||||||
| 	// will reset the processed line count back to 0 every 100ms.
 |  | ||||||
| 	LineResetInterval uint64 `json:"line_reset_interval" yaml:"line_reset_interval" default:"100"` |  | ||||||
| 
 |  | ||||||
| 	// The amount of time in milliseconds that must pass without an output warning being triggered
 |  | ||||||
| 	// before a throttle activation is decremented.
 |  | ||||||
| 	DecayInterval uint64 `json:"decay_interval" yaml:"decay_interval" default:"10000"` |  | ||||||
| 
 |  | ||||||
| 	// The amount of time that a server is allowed to be stopping for before it is terminated
 |  | ||||||
| 	// forcefully if it triggers output throttles.
 |  | ||||||
| 	StopGracePeriod uint `json:"stop_grace_period" yaml:"stop_grace_period" default:"15"` |  | ||||||
| } |  | ||||||
|  | @ -22,6 +22,7 @@ services: | ||||||
|       - "/var/lib/pterodactyl/:/var/lib/pterodactyl/" |       - "/var/lib/pterodactyl/:/var/lib/pterodactyl/" | ||||||
|       - "/var/log/pterodactyl/:/var/log/pterodactyl/" |       - "/var/log/pterodactyl/:/var/log/pterodactyl/" | ||||||
|       - "/tmp/pterodactyl/:/tmp/pterodactyl/" |       - "/tmp/pterodactyl/:/tmp/pterodactyl/" | ||||||
|  |       - "/etc/ssl/certs:/etc/ssl/certs:ro" | ||||||
|       # you may need /srv/daemon-data if you are upgrading from an old daemon |       # you may need /srv/daemon-data if you are upgrading from an old daemon | ||||||
|       #- "/srv/daemon-data/:/srv/daemon-data/" |       #- "/srv/daemon-data/:/srv/daemon-data/" | ||||||
|       # Required for ssl if you use let's encrypt. uncomment to use. |       # Required for ssl if you use let's encrypt. uncomment to use. | ||||||
|  |  | ||||||
|  | @ -6,112 +6,98 @@ import ( | ||||||
| 	"sync" | 	"sync" | ||||||
| 
 | 
 | ||||||
| 	"github.com/apex/log" | 	"github.com/apex/log" | ||||||
| 
 |  | ||||||
| 	"github.com/docker/docker/api/types" | 	"github.com/docker/docker/api/types" | ||||||
| 	"github.com/docker/docker/api/types/network" | 	"github.com/docker/docker/api/types/network" | ||||||
| 	"github.com/docker/docker/client" | 	"github.com/docker/docker/client" | ||||||
| 	"github.com/pterodactyl/wings/config" | 	"github.com/pterodactyl/wings/config" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| var _cmu sync.Mutex | var _conce sync.Once | ||||||
| var _client *client.Client | var _client *client.Client | ||||||
| 
 | 
 | ||||||
| // Return a Docker client to be used throughout the codebase. Once a client has been created it
 | // Docker returns a docker client to be used throughout the codebase. Once a
 | ||||||
| // will be returned for all subsequent calls to this function.
 | // client has been created it will be returned for all subsequent calls to this
 | ||||||
| func DockerClient() (*client.Client, error) { | // function.
 | ||||||
| 	_cmu.Lock() | func Docker() (*client.Client, error) { | ||||||
| 	defer _cmu.Unlock() | 	var err error | ||||||
| 
 | 	_conce.Do(func() { | ||||||
| 	if _client != nil { | 		_client, err = client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) | ||||||
| 		return _client, nil | 	}) | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	_client, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) |  | ||||||
| 
 |  | ||||||
| 	return _client, err | 	return _client, err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Configures the required network for the docker environment.
 | // ConfigureDocker configures the required network for the docker environment.
 | ||||||
| func ConfigureDocker(c *config.DockerConfiguration) error { | func ConfigureDocker(ctx context.Context) error { | ||||||
| 	// Ensure the required docker network exists on the system.
 | 	// Ensure the required docker network exists on the system.
 | ||||||
| 	cli, err := DockerClient() | 	cli, err := Docker() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	resource, err := cli.NetworkInspect(context.Background(), c.Network.Name, types.NetworkInspectOptions{}) | 	nw := config.Get().Docker.Network | ||||||
| 	if err != nil && client.IsErrNotFound(err) { | 	resource, err := cli.NetworkInspect(ctx, nw.Name, types.NetworkInspectOptions{}) | ||||||
|  | 	if err != nil { | ||||||
|  | 		if client.IsErrNotFound(err) { | ||||||
| 			log.Info("creating missing pterodactyl0 interface, this could take a few seconds...") | 			log.Info("creating missing pterodactyl0 interface, this could take a few seconds...") | ||||||
| 		return createDockerNetwork(cli, c) | 			if err := createDockerNetwork(ctx, cli); err != nil { | ||||||
| 	} else if err != nil { | 				return err | ||||||
| 		log.WithField("error", err).Fatal("failed to create required docker network for containers") | 			} | ||||||
|  | 		} else { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	switch resource.Driver { | 	config.Update(func(c *config.Configuration) { | ||||||
|  | 		c.Docker.Network.Driver = resource.Driver | ||||||
|  | 		switch c.Docker.Network.Driver { | ||||||
| 		case "host": | 		case "host": | ||||||
| 		c.Network.Interface = "127.0.0.1" | 			c.Docker.Network.Interface = "127.0.0.1" | ||||||
| 		c.Network.ISPN = false | 			c.Docker.Network.ISPN = false | ||||||
| 		return nil |  | ||||||
| 		case "overlay": | 		case "overlay": | ||||||
|  | 			fallthrough | ||||||
| 		case "weavemesh": | 		case "weavemesh": | ||||||
| 		c.Network.Interface = "" | 			c.Docker.Network.Interface = "" | ||||||
| 		c.Network.ISPN = true | 			c.Docker.Network.ISPN = true | ||||||
| 		return nil |  | ||||||
| 		default: | 		default: | ||||||
| 		c.Network.ISPN = false | 			c.Docker.Network.ISPN = false | ||||||
| 		} | 		} | ||||||
| 
 | 	}) | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Creates a new network on the machine if one does not exist already.
 | // Creates a new network on the machine if one does not exist already.
 | ||||||
| func createDockerNetwork(cli *client.Client, c *config.DockerConfiguration) error { | func createDockerNetwork(ctx context.Context, cli *client.Client) error { | ||||||
| 	_, err := cli.NetworkCreate(context.Background(), c.Network.Name, types.NetworkCreate{ | 	nw := config.Get().Docker.Network | ||||||
| 		Driver:     c.Network.Driver, | 	_, err := cli.NetworkCreate(ctx, nw.Name, types.NetworkCreate{ | ||||||
|  | 		Driver:     nw.Driver, | ||||||
| 		EnableIPv6: true, | 		EnableIPv6: true, | ||||||
| 		Internal:   c.Network.IsInternal, | 		Internal:   nw.IsInternal, | ||||||
| 		IPAM: &network.IPAM{ | 		IPAM: &network.IPAM{ | ||||||
| 			Config: []network.IPAMConfig{ | 			Config: []network.IPAMConfig{{ | ||||||
| 				{ | 				Subnet:  nw.Interfaces.V4.Subnet, | ||||||
| 					Subnet:  c.Network.Interfaces.V4.Subnet, | 				Gateway: nw.Interfaces.V4.Gateway, | ||||||
| 					Gateway: c.Network.Interfaces.V4.Gateway, | 			}, { | ||||||
| 				}, | 				Subnet:  nw.Interfaces.V6.Subnet, | ||||||
| 				{ | 				Gateway: nw.Interfaces.V6.Gateway, | ||||||
| 					Subnet:  c.Network.Interfaces.V6.Subnet, | 			}}, | ||||||
| 					Gateway: c.Network.Interfaces.V6.Gateway, |  | ||||||
| 				}, |  | ||||||
| 			}, |  | ||||||
| 		}, | 		}, | ||||||
| 		Options: map[string]string{ | 		Options: map[string]string{ | ||||||
| 			"encryption": "false", | 			"encryption": "false", | ||||||
| 			"com.docker.network.bridge.default_bridge":       "false", | 			"com.docker.network.bridge.default_bridge":       "false", | ||||||
| 			"com.docker.network.bridge.enable_icc":           strconv.FormatBool(c.Network.EnableICC), | 			"com.docker.network.bridge.enable_icc":           strconv.FormatBool(nw.EnableICC), | ||||||
| 			"com.docker.network.bridge.enable_ip_masquerade": "true", | 			"com.docker.network.bridge.enable_ip_masquerade": "true", | ||||||
| 			"com.docker.network.bridge.host_binding_ipv4":    "0.0.0.0", | 			"com.docker.network.bridge.host_binding_ipv4":    "0.0.0.0", | ||||||
| 			"com.docker.network.bridge.name":                 "pterodactyl0", | 			"com.docker.network.bridge.name":                 "pterodactyl0", | ||||||
| 			"com.docker.network.driver.mtu":                  "1500", | 			"com.docker.network.driver.mtu":                  "1500", | ||||||
| 		}, | 		}, | ||||||
| 	}) | 	}) | ||||||
| 
 |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 	if nw.Driver != "host" && nw.Driver != "overlay" && nw.Driver != "weavemesh" { | ||||||
| 	switch c.Network.Driver { | 		config.Update(func(c *config.Configuration) { | ||||||
| 	case "host": | 			c.Docker.Network.Interface = c.Docker.Network.Interfaces.V4.Gateway | ||||||
| 		c.Network.Interface = "127.0.0.1" | 		}) | ||||||
| 		c.Network.ISPN = false |  | ||||||
| 		break |  | ||||||
| 	case "overlay": |  | ||||||
| 	case "weavemesh": |  | ||||||
| 		c.Network.Interface = "" |  | ||||||
| 		c.Network.ISPN = true |  | ||||||
| 		break |  | ||||||
| 	default: |  | ||||||
| 		c.Network.Interface = c.Network.Interfaces.V4.Gateway |  | ||||||
| 		c.Network.ISPN = false |  | ||||||
| 		break |  | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -55,7 +55,7 @@ type Environment struct { | ||||||
| // reference the container from here on out. This should be unique per-server (we use the UUID
 | // reference the container from here on out. This should be unique per-server (we use the UUID
 | ||||||
| // by default). The container does not need to exist at this point.
 | // by default). The container does not need to exist at this point.
 | ||||||
| func New(id string, m *Metadata, c *environment.Configuration) (*Environment, error) { | func New(id string, m *Metadata, c *environment.Configuration) (*Environment, error) { | ||||||
| 	cli, err := environment.DockerClient() | 	cli, err := environment.Docker() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
							
								
								
									
										8
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										8
									
								
								go.sum
									
									
									
									
									
								
							|  | @ -285,6 +285,7 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 | ||||||
| github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= | github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= | ||||||
| github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= | ||||||
| github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= | github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= | ||||||
|  | github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= | ||||||
| github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= | github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= | ||||||
| github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= | github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= | ||||||
| github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= | github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= | ||||||
|  | @ -401,6 +402,7 @@ github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eI | ||||||
| github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= | github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= | ||||||
| github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= | github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= | ||||||
| github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= | github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= | ||||||
|  | github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= | ||||||
| github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= | github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= | ||||||
| github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= | github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= | ||||||
| github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= | github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= | ||||||
|  | @ -454,6 +456,7 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI | ||||||
| github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= | github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= | ||||||
| github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= | github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= | ||||||
| github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= | github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= | ||||||
|  | github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= | ||||||
| github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= | github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= | ||||||
| github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= | github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= | ||||||
| github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= | github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= | ||||||
|  | @ -550,17 +553,21 @@ github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4S | ||||||
| github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= | github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= | ||||||
| github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= | github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= | ||||||
| github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= | github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= | ||||||
|  | github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= | ||||||
| github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= | github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= | ||||||
|  | github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= | ||||||
| github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= | github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= | ||||||
| github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= | github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= | ||||||
| github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= | github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= | ||||||
| github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= | github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= | ||||||
|  | github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= | ||||||
| github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= | github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= | ||||||
| github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= | github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= | ||||||
| github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= | github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= | ||||||
| github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= | ||||||
| github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= | ||||||
| github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= | ||||||
|  | github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= | ||||||
| github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= | github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= | ||||||
| github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= | github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= | ||||||
| github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= | github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= | ||||||
|  | @ -575,6 +582,7 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P | ||||||
| github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= | ||||||
| github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= | github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= | ||||||
| github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||||
|  | github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= | ||||||
| github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= | github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= | ||||||
| github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA= | github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA= | ||||||
| github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= | github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= | ||||||
|  |  | ||||||
|  | @ -18,7 +18,22 @@ import ( | ||||||
| 	"time" | 	"time" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| var client = &http.Client{Timeout: time.Hour * 12} | var client = &http.Client{ | ||||||
|  | 	Timeout: time.Hour * 12, | ||||||
|  | 	// Disallow any redirect on a HTTP call. This is a security requirement: do not modify
 | ||||||
|  | 	// this logic without first ensuring that the new target location IS NOT within the current
 | ||||||
|  | 	// instance's local network.
 | ||||||
|  | 	//
 | ||||||
|  | 	// This specific error response just causes the client to not follow the redirect and
 | ||||||
|  | 	// returns the actual redirect response to the caller. Not perfect, but simple and most
 | ||||||
|  | 	// people won't be using URLs that redirect anyways hopefully?
 | ||||||
|  | 	//
 | ||||||
|  | 	// We'll re-evaluate this down the road if needed.
 | ||||||
|  | 	CheckRedirect: func(req *http.Request, via []*http.Request) error { | ||||||
|  | 		return http.ErrUseLastResponse | ||||||
|  | 	}, | ||||||
|  | } | ||||||
|  | 
 | ||||||
| var instance = &Downloader{ | var instance = &Downloader{ | ||||||
| 	// Tracks all of the active downloads.
 | 	// Tracks all of the active downloads.
 | ||||||
| 	downloadCache: make(map[string]*Download), | 	downloadCache: make(map[string]*Download), | ||||||
|  |  | ||||||
|  | @ -77,7 +77,6 @@ func (e *RequestError) AbortWithStatus(status int, c *gin.Context) { | ||||||
| 	// If this error is because the resource does not exist, we likely do not need to log
 | 	// If this error is because the resource does not exist, we likely do not need to log
 | ||||||
| 	// the error anywhere, just return a 404 and move on with our lives.
 | 	// the error anywhere, just return a 404 and move on with our lives.
 | ||||||
| 	if errors.Is(e.err, os.ErrNotExist) { | 	if errors.Is(e.err, os.ErrNotExist) { | ||||||
| 		e.logger().Debug("encountered os.IsNotExist error while handling request") |  | ||||||
| 		c.AbortWithStatusJSON(http.StatusNotFound, gin.H{ | 		c.AbortWithStatusJSON(http.StatusNotFound, gin.H{ | ||||||
| 			"error": "The requested resource was not found on the system.", | 			"error": "The requested resource was not found on the system.", | ||||||
| 		}) | 		}) | ||||||
|  | @ -122,20 +121,25 @@ func (e *RequestError) Abort(c *gin.Context) { | ||||||
| // Looks at the given RequestError and determines if it is a specific filesystem error that
 | // Looks at the given RequestError and determines if it is a specific filesystem error that
 | ||||||
| // we can process and return differently for the user.
 | // we can process and return differently for the user.
 | ||||||
| func (e *RequestError) getAsFilesystemError() (int, string) { | func (e *RequestError) getAsFilesystemError() (int, string) { | ||||||
| 	err := errors.Unwrap(e.err) | 	// Some external things end up calling fmt.Errorf() on our filesystem errors
 | ||||||
| 	if err == nil { | 	// which ends up just unleashing chaos on the system. For the sake of this
 | ||||||
| 		return 0, "" | 	// fallback to using text checks...
 | ||||||
|  | 	if filesystem.IsErrorCode(e.err, filesystem.ErrCodeDenylistFile) || strings.Contains(e.err.Error(), "filesystem: file access prohibited") { | ||||||
|  | 		return http.StatusForbidden, "This file cannot be modified: present in egg denylist." | ||||||
| 	} | 	} | ||||||
| 	if errors.Is(err, os.ErrNotExist) || filesystem.IsErrorCode(err, filesystem.ErrCodePathResolution) { | 	if filesystem.IsErrorCode(e.err, filesystem.ErrCodePathResolution) || strings.Contains(e.err.Error(), "resolves to a location outside the server root") { | ||||||
| 		return http.StatusNotFound, "The requested resource was not found on the system." | 		return http.StatusNotFound, "The requested resource was not found on the system." | ||||||
| 	} | 	} | ||||||
| 	if filesystem.IsErrorCode(err, filesystem.ErrCodeDiskSpace) { | 	if filesystem.IsErrorCode(e.err, filesystem.ErrCodeIsDirectory) || strings.Contains(e.err.Error(), "filesystem: is a directory") { | ||||||
| 		return http.StatusConflict, "There is not enough disk space available to perform that action." | 		return http.StatusBadRequest, "Cannot perform that action: file is a directory." | ||||||
| 	} | 	} | ||||||
| 	if strings.HasSuffix(err.Error(), "file name too long") { | 	if filesystem.IsErrorCode(e.err, filesystem.ErrCodeDiskSpace) || strings.Contains(e.err.Error(), "filesystem: not enough disk space") { | ||||||
|  | 		return http.StatusBadRequest, "Cannot perform that action: file is a directory." | ||||||
|  | 	} | ||||||
|  | 	if strings.HasSuffix(e.err.Error(), "file name too long") { | ||||||
| 		return http.StatusBadRequest, "Cannot perform that action: file name is too long." | 		return http.StatusBadRequest, "Cannot perform that action: file name is too long." | ||||||
| 	} | 	} | ||||||
| 	if e, ok := err.(*os.SyscallError); ok && e.Syscall == "readdirent" { | 	if e, ok := e.err.(*os.SyscallError); ok && e.Syscall == "readdirent" { | ||||||
| 		return http.StatusNotFound, "The requested directory does not exist." | 		return http.StatusNotFound, "The requested directory does not exist." | ||||||
| 	} | 	} | ||||||
| 	return 0, "" | 	return 0, "" | ||||||
|  |  | ||||||
							
								
								
									
										315
									
								
								router/middleware/middleware.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										315
									
								
								router/middleware/middleware.go
									
									
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,315 @@ | ||||||
|  | package middleware | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"context" | ||||||
|  | 	"crypto/subtle" | ||||||
|  | 	"io" | ||||||
|  | 	"net/http" | ||||||
|  | 	"os" | ||||||
|  | 	"strings" | ||||||
|  | 
 | ||||||
|  | 	"emperror.dev/errors" | ||||||
|  | 	"github.com/apex/log" | ||||||
|  | 	"github.com/gin-gonic/gin" | ||||||
|  | 	"github.com/google/uuid" | ||||||
|  | 	"github.com/pterodactyl/wings/config" | ||||||
|  | 	"github.com/pterodactyl/wings/server" | ||||||
|  | 	"github.com/pterodactyl/wings/server/filesystem" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // RequestError is a custom error type returned when something goes wrong with
 | ||||||
|  | // any of the HTTP endpoints.
 | ||||||
|  | type RequestError struct { | ||||||
|  | 	err    error | ||||||
|  | 	status int | ||||||
|  | 	msg    string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewError returns a new RequestError for the provided error.
 | ||||||
|  | func NewError(err error) *RequestError { | ||||||
|  | 	return &RequestError{ | ||||||
|  | 		// Attach a stacktrace to the error if it is missing at this point and mark it
 | ||||||
|  | 		// as originating from the location where NewError was called, rather than this
 | ||||||
|  | 		// specific point in the code.
 | ||||||
|  | 		err: errors.WithStackDepthIf(err, 1), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetMessage allows for a custom error message to be set on an existing
 | ||||||
|  | // RequestError instance.
 | ||||||
|  | func (re *RequestError) SetMessage(m string) { | ||||||
|  | 	re.msg = m | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetStatus sets the HTTP status code for the error response. By default this
 | ||||||
|  | // is a HTTP-500 error.
 | ||||||
|  | func (re *RequestError) SetStatus(s int) { | ||||||
|  | 	re.status = s | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Abort aborts the given HTTP request with the specified status code and then
 | ||||||
|  | // logs the event into the logs. The error that is output will include the unique
 | ||||||
|  | // request ID if it is present.
 | ||||||
|  | func (re *RequestError) Abort(c *gin.Context, status int) { | ||||||
|  | 	reqId := c.Writer.Header().Get("X-Request-Id") | ||||||
|  | 
 | ||||||
|  | 	// Generate the base logger instance, attaching the unique request ID and
 | ||||||
|  | 	// the URL that was requested.
 | ||||||
|  | 	event := log.WithField("request_id", reqId).WithField("url", c.Request.URL.String()) | ||||||
|  | 	// If there is a server present in the gin.Context stack go ahead and pull it
 | ||||||
|  | 	// and attach that server UUID to the logs as well so that we can see what specific
 | ||||||
|  | 	// server triggered this error.
 | ||||||
|  | 	if s, ok := c.Get("server"); ok { | ||||||
|  | 		if s, ok := s.(*server.Server); ok { | ||||||
|  | 			event = event.WithField("server_id", s.Id()) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	if c.Writer.Status() == 200 { | ||||||
|  | 		// Handle context deadlines being exceeded a little differently since we want
 | ||||||
|  | 		// to report a more user-friendly error and a proper error code. The "context
 | ||||||
|  | 		// canceled" error is generally when a request is terminated before all of the
 | ||||||
|  | 		// logic is finished running.
 | ||||||
|  | 		if errors.Is(re.err, context.DeadlineExceeded) { | ||||||
|  | 			re.SetStatus(http.StatusGatewayTimeout) | ||||||
|  | 			re.SetMessage("The server could not process this request in time, please try again.") | ||||||
|  | 		} else if strings.Contains(re.Cause().Error(), "context canceled") { | ||||||
|  | 			re.SetStatus(http.StatusBadRequest) | ||||||
|  | 			re.SetMessage("Request aborted by client.") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// c.Writer.Status() will be a non-200 value if the headers have already been sent
 | ||||||
|  | 	// to the requester but an error is encountered. This can happen if there is an issue
 | ||||||
|  | 	// marshaling a struct placed into a c.JSON() call (or c.AbortWithJSON() call).
 | ||||||
|  | 	if status >= 500 || c.Writer.Status() != 200 { | ||||||
|  | 		event.WithField("status", status).WithField("error", re.err).Error("error while handling HTTP request") | ||||||
|  | 	} else { | ||||||
|  | 		event.WithField("status", status).WithField("error", re.err).Debug("error handling HTTP request (not a server error)") | ||||||
|  | 	} | ||||||
|  | 	if re.msg == "" { | ||||||
|  | 		re.msg = "An unexpected error was encountered while processing this request" | ||||||
|  | 	} | ||||||
|  | 	// Now abort the request with the error message and include the unique request
 | ||||||
|  | 	// ID that was present to make things super easy on people who don't know how
 | ||||||
|  | 	// or cannot view the response headers (where X-Request-Id would be present).
 | ||||||
|  | 	c.AbortWithStatusJSON(status, gin.H{"error": re.msg, "request_id": reqId}) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Cause returns the underlying error.
 | ||||||
|  | func (re *RequestError) Cause() error { | ||||||
|  | 	return re.err | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Error returns the underlying error message for this request.
 | ||||||
|  | func (re *RequestError) Error() string { | ||||||
|  | 	return re.err.Error() | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Looks at the given RequestError and determines if it is a specific filesystem
 | ||||||
|  | // error that we can process and return differently for the user.
 | ||||||
|  | //
 | ||||||
|  | // Some external things end up calling fmt.Errorf() on our filesystem errors
 | ||||||
|  | // which ends up just unleashing chaos on the system. For the sake of this,
 | ||||||
|  | // fallback to using text checks.
 | ||||||
|  | //
 | ||||||
|  | // If the error passed into this call is nil or does not match empty values will
 | ||||||
|  | // be returned to the caller.
 | ||||||
|  | func (re *RequestError) asFilesystemError() (int, string) { | ||||||
|  | 	err := re.Cause() | ||||||
|  | 	if err == nil { | ||||||
|  | 		return 0, "" | ||||||
|  | 	} | ||||||
|  | 	if filesystem.IsErrorCode(err, filesystem.ErrCodeDenylistFile) || strings.Contains(err.Error(), "filesystem: file access prohibited") { | ||||||
|  | 		return http.StatusForbidden, "This file cannot be modified: present in egg denylist." | ||||||
|  | 	} | ||||||
|  | 	if filesystem.IsErrorCode(err, filesystem.ErrCodePathResolution) || strings.Contains(err.Error(), "resolves to a location outside the server root") { | ||||||
|  | 		return http.StatusNotFound, "The requested resource was not found on the system." | ||||||
|  | 	} | ||||||
|  | 	if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) || strings.Contains(err.Error(), "filesystem: is a directory") { | ||||||
|  | 		return http.StatusBadRequest, "Cannot perform that action: file is a directory." | ||||||
|  | 	} | ||||||
|  | 	if filesystem.IsErrorCode(err, filesystem.ErrCodeDiskSpace) || strings.Contains(err.Error(), "filesystem: not enough disk space") { | ||||||
|  | 		return http.StatusBadRequest, "There is not enough disk space available to perform that action." | ||||||
|  | 	} | ||||||
|  | 	if strings.HasSuffix(err.Error(), "file name too long") { | ||||||
|  | 		return http.StatusBadRequest, "Cannot perform that action: file name is too long." | ||||||
|  | 	} | ||||||
|  | 	if e, ok := err.(*os.SyscallError); ok && e.Syscall == "readdirent" { | ||||||
|  | 		return http.StatusNotFound, "The requested directory does not exist." | ||||||
|  | 	} | ||||||
|  | 	return 0, "" | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // AttachRequestID attaches a unique ID to the incoming HTTP request so that any
 | ||||||
|  | // errors that are generated or returned to the client will include this reference
 | ||||||
|  | // allowing for an easier time identifying the specific request that failed for
 | ||||||
|  | // the user.
 | ||||||
|  | //
 | ||||||
|  | // If you are using a tool such as Sentry or Bugsnag for error reporting this is
 | ||||||
|  | // a great location to also attach this request ID to your error handling logic
 | ||||||
|  | // so that you can easily cross-reference the errors.
 | ||||||
|  | func AttachRequestID() gin.HandlerFunc { | ||||||
|  | 	return func(c *gin.Context) { | ||||||
|  | 		id := uuid.New().String() | ||||||
|  | 		c.Set("request_id", id) | ||||||
|  | 		c.Set("logger", log.WithField("request_id", id)) | ||||||
|  | 		c.Header("X-Request-Id", id) | ||||||
|  | 		c.Next() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CaptureAndAbort aborts the request and attaches the provided error to the gin
 | ||||||
|  | // context so it can be reported properly. If the error is missing a stacktrace
 | ||||||
|  | // at the time it is called the stack will be attached.
 | ||||||
|  | func CaptureAndAbort(c *gin.Context, err error) { | ||||||
|  | 	c.Abort() | ||||||
|  | 	c.Error(errors.WithStackDepthIf(err, 1)) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // CaptureErrors is custom handler function allowing for errors bubbled up by
 | ||||||
|  | // c.Error() to be returned in a standardized format with tracking UUIDs on them
 | ||||||
|  | // for easier log searching.
 | ||||||
|  | func CaptureErrors() gin.HandlerFunc { | ||||||
|  | 	return func(c *gin.Context) { | ||||||
|  | 		c.Next() | ||||||
|  | 		err := c.Errors.Last() | ||||||
|  | 		if err == nil || err.Err == nil { | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		status := http.StatusInternalServerError | ||||||
|  | 		if c.Writer.Status() != 200 { | ||||||
|  | 			status = c.Writer.Status() | ||||||
|  | 		} | ||||||
|  | 		if err.Error() == io.EOF.Error() { | ||||||
|  | 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "The data passed in the request was not in a parsable format. Please try again."}) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		captured := NewError(err.Err) | ||||||
|  | 		if status, msg := captured.asFilesystemError(); msg != "" { | ||||||
|  | 			c.AbortWithStatusJSON(status, gin.H{"error": msg, "request_id": c.Writer.Header().Get("X-Request-Id")}) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		captured.Abort(c, status) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // SetAccessControlHeaders sets the access request control headers on all of
 | ||||||
|  | // the requests.
 | ||||||
|  | func SetAccessControlHeaders() gin.HandlerFunc { | ||||||
|  | 	origins := config.Get().AllowedOrigins | ||||||
|  | 	location := config.Get().PanelLocation | ||||||
|  | 
 | ||||||
|  | 	return func(c *gin.Context) { | ||||||
|  | 		c.Header("Access-Control-Allow-Credentials", "true") | ||||||
|  | 		c.Header("Access-Control-Allow-Methods", "GET, POST, PATCH, PUT, DELETE, OPTIONS") | ||||||
|  | 		// Maximum age allowable under Chromium v76 is 2 hours, so just use that since
 | ||||||
|  | 		// anything higher will be ignored (even if other browsers do allow higher values).
 | ||||||
|  | 		//
 | ||||||
|  | 		// @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age#Directives
 | ||||||
|  | 		c.Header("Access-Control-Max-Age", "7200") | ||||||
|  | 		c.Header("Access-Control-Allow-Origin", location) | ||||||
|  | 		c.Header("Access-Control-Allow-Headers", "Accept, Accept-Encoding, Authorization, Cache-Control, Content-Type, Content-Length, Origin, X-Real-IP, X-CSRF-Token") | ||||||
|  | 		// Validate that the request origin is coming from an allowed origin. Because you
 | ||||||
|  | 		// cannot set multiple values here we need to see if the origin is one of the ones
 | ||||||
|  | 		// that we allow, and if so return it explicitly. Otherwise, just return the default
 | ||||||
|  | 		// origin which is the same URL that the Panel is located at.
 | ||||||
|  | 		origin := c.GetHeader("Origin") | ||||||
|  | 		if origin != location { | ||||||
|  | 			for _, o := range origins { | ||||||
|  | 				if o != "*" && o != origin { | ||||||
|  | 					continue | ||||||
|  | 				} | ||||||
|  | 				c.Header("Access-Control-Allow-Origin", o) | ||||||
|  | 				break | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 		if c.Request.Method == http.MethodOptions { | ||||||
|  | 			c.AbortWithStatus(http.StatusNoContent) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		c.Next() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ServerExists will ensure that the requested server exists in this setup.
 | ||||||
|  | // Returns a 404 if we cannot locate it. If the server is found it is set into
 | ||||||
|  | // the request context, and the logger for the context is also updated to include
 | ||||||
|  | // the server ID in the fields list.
 | ||||||
|  | func ServerExists() gin.HandlerFunc { | ||||||
|  | 	return func(c *gin.Context) { | ||||||
|  | 		s := server.GetServers().Find(func(s *server.Server) bool { | ||||||
|  | 			return c.Param("server") == s.Id() | ||||||
|  | 		}) | ||||||
|  | 		if s == nil { | ||||||
|  | 			c.AbortWithStatusJSON(http.StatusNotFound, gin.H{"error": "The requested resource does not exist on this instance."}) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		c.Set("logger", ExtractLogger(c).WithField("server_id", s.Id())) | ||||||
|  | 		c.Set("server", s) | ||||||
|  | 		c.Next() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RequireAuthorization authenticates the request token against the given
 | ||||||
|  | // permission string, ensuring that if it is a server permission, the token has
 | ||||||
|  | // control over that server. If it is a global token, this will ensure that the
 | ||||||
|  | // request is using a properly signed global token.
 | ||||||
|  | func RequireAuthorization() gin.HandlerFunc { | ||||||
|  | 	return func(c *gin.Context) { | ||||||
|  | 		// We don't put this value outside this function since the node's authentication
 | ||||||
|  | 		// token can be changed on the fly and the config.Get() call returns a copy, so
 | ||||||
|  | 		// if it is rotated this value will never properly get updated.
 | ||||||
|  | 		token := config.Get().AuthenticationToken | ||||||
|  | 		auth := strings.SplitN(c.GetHeader("Authorization"), " ", 2) | ||||||
|  | 		if len(auth) != 2 || auth[0] != "Bearer" { | ||||||
|  | 			c.Header("WWW-Authenticate", "Bearer") | ||||||
|  | 			c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "The required authorization heads were not present in the request."}) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// All requests to Wings must be authorized with the authentication token present in
 | ||||||
|  | 		// the Wings configuration file. Remeber, all requests to Wings come from the Panel
 | ||||||
|  | 		// backend, or using a signed JWT for temporary authentication.
 | ||||||
|  | 		if subtle.ConstantTimeCompare([]byte(auth[1]), []byte(token)) != 1 { | ||||||
|  | 			c.AbortWithStatusJSON(http.StatusForbidden, gin.H{"error": "You are not authorized to access this endpoint."}) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		c.Next() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // RemoteDownloadEnabled checks if remote downloads are enabled for this instance
 | ||||||
|  | // and if not aborts the request.
 | ||||||
|  | func RemoteDownloadEnabled() gin.HandlerFunc { | ||||||
|  | 	disabled := config.Get().Api.DisableRemoteDownload | ||||||
|  | 	return func(c *gin.Context) { | ||||||
|  | 		if disabled { | ||||||
|  | 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "This functionality is not currently enabled on this instance."}) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		c.Next() | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ExtractLogger pulls the logger out of the request context and returns it. By
 | ||||||
|  | // default this will include the request ID, but may also include the server ID
 | ||||||
|  | // if that middleware has been used in the chain by the time it is called.
 | ||||||
|  | func ExtractLogger(c *gin.Context) *log.Entry { | ||||||
|  | 	v, ok := c.Get("logger") | ||||||
|  | 	if !ok { | ||||||
|  | 		panic("middleware/middleware: cannot extract logger: not present in request context") | ||||||
|  | 	} | ||||||
|  | 	return v.(*log.Entry) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // ExtractServer will return the server from the gin.Context or panic if it is
 | ||||||
|  | // not present.
 | ||||||
|  | func ExtractServer(c *gin.Context) *server.Server { | ||||||
|  | 	v, ok := c.Get("server") | ||||||
|  | 	if !ok { | ||||||
|  | 		panic("middleware/middleware: cannot extract server: not present in request context") | ||||||
|  | 	} | ||||||
|  | 	return v.(*server.Server) | ||||||
|  | } | ||||||
|  | @ -3,37 +3,33 @@ package router | ||||||
| import ( | import ( | ||||||
| 	"github.com/apex/log" | 	"github.com/apex/log" | ||||||
| 	"github.com/gin-gonic/gin" | 	"github.com/gin-gonic/gin" | ||||||
| 	"github.com/pterodactyl/wings/server" | 	"github.com/pterodactyl/wings/router/middleware" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // Configures the routing infrastructure for this daemon instance.
 | // Configure configures the routing infrastructure for this daemon instance.
 | ||||||
| func Configure(serverManager server.Manager) *gin.Engine { | func Configure() *gin.Engine { | ||||||
| 	gin.SetMode("release") | 	gin.SetMode("release") | ||||||
| 
 | 
 | ||||||
| 	m := Middleware{ |  | ||||||
| 		serverManager, |  | ||||||
| 	} |  | ||||||
| 	router := gin.New() | 	router := gin.New() | ||||||
| 	router.Use(gin.Recovery(), m.ErrorHandler(), m.SetAccessControlHeaders(), m.WithServerManager()) | 	router.Use(gin.Recovery()) | ||||||
|  | 	router.Use(middleware.AttachRequestID(), middleware.CaptureErrors(), middleware.SetAccessControlHeaders()) | ||||||
| 	// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
 | 	// @todo log this into a different file so you can setup IP blocking for abusive requests and such.
 | ||||||
| 	// This should still dump requests in debug mode since it does help with understanding the request
 | 	// This should still dump requests in debug mode since it does help with understanding the request
 | ||||||
| 	// lifecycle and quickly seeing what was called leading to the logs. However, it isn't feasible to mix
 | 	// lifecycle and quickly seeing what was called leading to the logs. However, it isn't feasible to mix
 | ||||||
| 	// this output in production and still get meaningful logs from it since they'll likely just be a huge
 | 	// this output in production and still get meaningful logs from it since they'll likely just be a huge
 | ||||||
| 	// spamfest.
 | 	// spamfest.
 | ||||||
|  | 	router.Use() | ||||||
| 	router.Use(gin.LoggerWithFormatter(func(params gin.LogFormatterParams) string { | 	router.Use(gin.LoggerWithFormatter(func(params gin.LogFormatterParams) string { | ||||||
| 		log.WithFields(log.Fields{ | 		log.WithFields(log.Fields{ | ||||||
| 			"client_ip":  params.ClientIP, | 			"client_ip":  params.ClientIP, | ||||||
| 			"status":     params.StatusCode, | 			"status":     params.StatusCode, | ||||||
| 			"latency":    params.Latency, | 			"latency":    params.Latency, | ||||||
|  | 			"request_id": params.Keys["request_id"], | ||||||
| 		}).Debugf("%s %s", params.MethodColor()+params.Method+params.ResetColor(), params.Path) | 		}).Debugf("%s %s", params.MethodColor()+params.Method+params.ResetColor(), params.Path) | ||||||
| 
 | 
 | ||||||
| 		return "" | 		return "" | ||||||
| 	})) | 	})) | ||||||
| 
 | 
 | ||||||
| 	router.OPTIONS("/api/system", func(c *gin.Context) { |  | ||||||
| 		c.Status(200) |  | ||||||
| 	}) |  | ||||||
| 
 |  | ||||||
| 	// These routes use signed URLs to validate access to the resource being requested.
 | 	// These routes use signed URLs to validate access to the resource being requested.
 | ||||||
| 	router.GET("/download/backup", getDownloadBackup) | 	router.GET("/download/backup", getDownloadBackup) | ||||||
| 	router.GET("/download/file", getDownloadFile) | 	router.GET("/download/file", getDownloadFile) | ||||||
|  | @ -42,16 +38,16 @@ func Configure(serverManager server.Manager) *gin.Engine { | ||||||
| 	// This route is special it sits above all of the other requests because we are
 | 	// This route is special it sits above all of the other requests because we are
 | ||||||
| 	// using a JWT to authorize access to it, therefore it needs to be publicly
 | 	// using a JWT to authorize access to it, therefore it needs to be publicly
 | ||||||
| 	// accessible.
 | 	// accessible.
 | ||||||
| 	router.GET("/api/servers/:server/ws", m.ServerExists(), getServerWebsocket) | 	router.GET("/api/servers/:server/ws", middleware.ServerExists(), getServerWebsocket) | ||||||
| 
 | 
 | ||||||
| 	// This request is called by another daemon when a server is going to be transferred out.
 | 	// This request is called by another daemon when a server is going to be transferred out.
 | ||||||
| 	// This request does not need the AuthorizationMiddleware as the panel should never call it
 | 	// This request does not need the AuthorizationMiddleware as the panel should never call it
 | ||||||
| 	// and requests are authenticated through a JWT the panel issues to the other daemon.
 | 	// and requests are authenticated through a JWT the panel issues to the other daemon.
 | ||||||
| 	router.GET("/api/servers/:server/archive", m.ServerExists(), getServerArchive) | 	router.GET("/api/servers/:server/archive", middleware.ServerExists(), getServerArchive) | ||||||
| 
 | 
 | ||||||
| 	// All of the routes beyond this mount will use an authorization middleware
 | 	// All of the routes beyond this mount will use an authorization middleware
 | ||||||
| 	// and will not be accessible without the correct Authorization header provided.
 | 	// and will not be accessible without the correct Authorization header provided.
 | ||||||
| 	protected := router.Use(m.RequireAuthorization()) | 	protected := router.Use(middleware.RequireAuthorization()) | ||||||
| 	protected.POST("/api/update", postUpdateConfiguration) | 	protected.POST("/api/update", postUpdateConfiguration) | ||||||
| 	protected.GET("/api/system", getSystemInformation) | 	protected.GET("/api/system", getSystemInformation) | ||||||
| 	protected.GET("/api/servers", getAllServers) | 	protected.GET("/api/servers", getAllServers) | ||||||
|  | @ -61,7 +57,7 @@ func Configure(serverManager server.Manager) *gin.Engine { | ||||||
| 	// These are server specific routes, and require that the request be authorized, and
 | 	// These are server specific routes, and require that the request be authorized, and
 | ||||||
| 	// that the server exist on the Daemon.
 | 	// that the server exist on the Daemon.
 | ||||||
| 	server := router.Group("/api/servers/:server") | 	server := router.Group("/api/servers/:server") | ||||||
| 	server.Use(m.RequireAuthorization(), m.ServerExists()) | 	server.Use(middleware.RequireAuthorization(), middleware.ServerExists()) | ||||||
| 	{ | 	{ | ||||||
| 		server.GET("", getServer) | 		server.GET("", getServer) | ||||||
| 		server.PATCH("", patchServer) | 		server.PATCH("", patchServer) | ||||||
|  | @ -91,9 +87,9 @@ func Configure(serverManager server.Manager) *gin.Engine { | ||||||
| 			files.POST("/decompress", postServerDecompressFiles) | 			files.POST("/decompress", postServerDecompressFiles) | ||||||
| 			files.POST("/chmod", postServerChmodFile) | 			files.POST("/chmod", postServerChmodFile) | ||||||
| 
 | 
 | ||||||
| 			files.GET("/pull", getServerPullingFiles) | 			files.GET("/pull", middleware.RemoteDownloadEnabled(), getServerPullingFiles) | ||||||
| 			files.POST("/pull", postServerPullRemoteFile) | 			files.POST("/pull", middleware.RemoteDownloadEnabled(), postServerPullRemoteFile) | ||||||
| 			files.DELETE("/pull/:download", deleteServerPullRemoteFile) | 			files.DELETE("/pull/:download", middleware.RemoteDownloadEnabled(), deleteServerPullRemoteFile) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		backup := server.Group("/backup") | 		backup := server.Group("/backup") | ||||||
|  |  | ||||||
|  | @ -1,6 +1,7 @@ | ||||||
| package router | package router | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	"bufio" | ||||||
| 	"context" | 	"context" | ||||||
| 	"mime/multipart" | 	"mime/multipart" | ||||||
| 	"net/http" | 	"net/http" | ||||||
|  | @ -15,47 +16,41 @@ import ( | ||||||
| 	"github.com/apex/log" | 	"github.com/apex/log" | ||||||
| 	"github.com/gin-gonic/gin" | 	"github.com/gin-gonic/gin" | ||||||
| 	"github.com/pterodactyl/wings/router/downloader" | 	"github.com/pterodactyl/wings/router/downloader" | ||||||
|  | 	"github.com/pterodactyl/wings/router/middleware" | ||||||
| 	"github.com/pterodactyl/wings/router/tokens" | 	"github.com/pterodactyl/wings/router/tokens" | ||||||
| 	"github.com/pterodactyl/wings/server" | 	"github.com/pterodactyl/wings/server" | ||||||
| 	"github.com/pterodactyl/wings/server/filesystem" | 	"github.com/pterodactyl/wings/server/filesystem" | ||||||
| 	"golang.org/x/sync/errgroup" | 	"golang.org/x/sync/errgroup" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // Returns the contents of a file on the server.
 | // getServerFileContents returns the contents of a file on the server.
 | ||||||
| func getServerFileContents(c *gin.Context) { | func getServerFileContents(c *gin.Context) { | ||||||
| 	s := ExtractServer(c) | 	s := middleware.ExtractServer(c) | ||||||
| 	f := c.Query("file") | 	p := "/" + strings.TrimLeft(c.Query("file"), "/") | ||||||
| 	p := "/" + strings.TrimLeft(f, "/") | 	f, st, err := s.Filesystem().File(p) | ||||||
| 	st, err := s.Filesystem().Stat(p) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		WithError(c, err) | 		middleware.CaptureAndAbort(c, err) | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
|  | 	defer f.Close() | ||||||
| 
 | 
 | ||||||
| 	c.Header("X-Mime-Type", st.Mimetype) | 	c.Header("X-Mime-Type", st.Mimetype) | ||||||
| 	c.Header("Content-Length", strconv.Itoa(int(st.Info.Size()))) | 	c.Header("Content-Length", strconv.Itoa(int(st.Size()))) | ||||||
| 
 |  | ||||||
| 	// If a download parameter is included in the URL go ahead and attach the necessary headers
 | 	// If a download parameter is included in the URL go ahead and attach the necessary headers
 | ||||||
| 	// so that the file can be downloaded.
 | 	// so that the file can be downloaded.
 | ||||||
| 	if c.Query("download") != "" { | 	if c.Query("download") != "" { | ||||||
| 		c.Header("Content-Disposition", "attachment; filename="+st.Info.Name()) | 		c.Header("Content-Disposition", "attachment; filename="+st.Name()) | ||||||
| 		c.Header("Content-Type", "application/octet-stream") | 		c.Header("Content-Type", "application/octet-stream") | ||||||
| 	} | 	} | ||||||
| 
 | 	defer c.Writer.Flush() | ||||||
| 	// TODO(dane): should probably come up with a different approach here. If an error is encountered
 | 	_, err = bufio.NewReader(f).WriteTo(c.Writer) | ||||||
| 	//  by this Readfile call you'll end up causing a (recovered) panic in the program because so many
 | 	if err != nil { | ||||||
| 	//  headers have already been set. We should probably add a RawReadfile that just returns the file
 | 		// Pretty sure this will unleash chaos on the response, but its a risk we can
 | ||||||
| 	//  to be read and then we can stream from that safely without error.
 | 		// take since a panic will at least be recovered and this should be incredibly
 | ||||||
| 	//
 | 		// rare?
 | ||||||
| 	// Until that becomes a problem though I'm just going to leave this how it is. The panic is recovered
 | 		middleware.CaptureAndAbort(c, err) | ||||||
| 	// and a normal 500 error is returned to the client to my knowledge. It is also very unlikely to
 |  | ||||||
| 	// happen since we're doing so much before this point that would normally throw an error if there
 |  | ||||||
| 	// was a problem with the file.
 |  | ||||||
| 	if err := s.Filesystem().Readfile(p, c.Writer); err != nil { |  | ||||||
| 		WithError(c, err) |  | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 	c.Writer.Flush() |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Returns the contents of a directory for a server.
 | // Returns the contents of a directory for a server.
 | ||||||
|  | @ -94,8 +89,7 @@ func putServerRenameFiles(c *gin.Context) { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	g, ctx := errgroup.WithContext(context.Background()) | 	g, ctx := errgroup.WithContext(c.Request.Context()) | ||||||
| 
 |  | ||||||
| 	// Loop over the array of files passed in and perform the move or rename action against each.
 | 	// Loop over the array of files passed in and perform the move or rename action against each.
 | ||||||
| 	for _, p := range data.Files { | 	for _, p := range data.Files { | ||||||
| 		pf := path.Join(data.Root, p.From) | 		pf := path.Join(data.Root, p.From) | ||||||
|  | @ -106,16 +100,20 @@ func putServerRenameFiles(c *gin.Context) { | ||||||
| 			case <-ctx.Done(): | 			case <-ctx.Done(): | ||||||
| 				return ctx.Err() | 				return ctx.Err() | ||||||
| 			default: | 			default: | ||||||
| 				if err := s.Filesystem().Rename(pf, pt); err != nil { | 				fs := s.Filesystem() | ||||||
|  | 				// Ignore renames on a file that is on the denylist (both as the rename from or
 | ||||||
|  | 				// the rename to value).
 | ||||||
|  | 				if err := fs.IsIgnored(pf, pt); err != nil { | ||||||
|  | 					return err | ||||||
|  | 				} | ||||||
|  | 				if err := fs.Rename(pf, pt); err != nil { | ||||||
| 					// Return nil if the error is an is not exists.
 | 					// Return nil if the error is an is not exists.
 | ||||||
| 					// NOTE: os.IsNotExist() does not work if the error is wrapped.
 | 					// NOTE: os.IsNotExist() does not work if the error is wrapped.
 | ||||||
| 					if errors.Is(err, os.ErrNotExist) { | 					if errors.Is(err, os.ErrNotExist) { | ||||||
| 						return nil | 						return nil | ||||||
| 					} | 					} | ||||||
| 
 |  | ||||||
| 					return err | 					return err | ||||||
| 				} | 				} | ||||||
| 
 |  | ||||||
| 				return nil | 				return nil | ||||||
| 			} | 			} | ||||||
| 		}) | 		}) | ||||||
|  | @ -148,6 +146,10 @@ func postServerCopyFile(c *gin.Context) { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	if err := s.Filesystem().IsIgnored(data.Location); err != nil { | ||||||
|  | 		NewServerError(err, s).Abort(c) | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
| 	if err := s.Filesystem().Copy(data.Location); err != nil { | 	if err := s.Filesystem().Copy(data.Location); err != nil { | ||||||
| 		NewServerError(err, s).AbortFilesystemError(c) | 		NewServerError(err, s).AbortFilesystemError(c) | ||||||
| 		return | 		return | ||||||
|  | @ -208,6 +210,10 @@ func postServerWriteFile(c *gin.Context) { | ||||||
| 	f := c.Query("file") | 	f := c.Query("file") | ||||||
| 	f = "/" + strings.TrimLeft(f, "/") | 	f = "/" + strings.TrimLeft(f, "/") | ||||||
| 
 | 
 | ||||||
|  | 	if err := s.Filesystem().IsIgnored(f); err != nil { | ||||||
|  | 		NewServerError(err, s).Abort(c) | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
| 	if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil { | 	if err := s.Filesystem().Writefile(f, c.Request.Body); err != nil { | ||||||
| 		if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) { | 		if filesystem.IsErrorCode(err, filesystem.ErrCodeIsDirectory) { | ||||||
| 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ | 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ | ||||||
|  | @ -359,69 +365,53 @@ func postServerCompressFiles(c *gin.Context) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	c.JSON(http.StatusOK, &filesystem.Stat{ | 	c.JSON(http.StatusOK, &filesystem.Stat{ | ||||||
| 		Info:     f, | 		FileInfo: f, | ||||||
| 		Mimetype: "application/tar+gzip", | 		Mimetype: "application/tar+gzip", | ||||||
| 	}) | 	}) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // postServerDecompressFiles receives the HTTP request and starts the process
 | ||||||
|  | // of unpacking an archive that exists on the server into the provided RootPath
 | ||||||
|  | // for the server.
 | ||||||
| func postServerDecompressFiles(c *gin.Context) { | func postServerDecompressFiles(c *gin.Context) { | ||||||
| 	s := ExtractServer(c) | 	s := middleware.ExtractServer(c) | ||||||
| 
 | 	lg := middleware.ExtractLogger(c) | ||||||
| 	var data struct { | 	var data struct { | ||||||
| 		RootPath string `json:"root"` | 		RootPath string `json:"root"` | ||||||
| 		File     string `json:"file"` | 		File     string `json:"file"` | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	if err := c.BindJSON(&data); err != nil { | 	if err := c.BindJSON(&data); err != nil { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	hasSpace, err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File) | 	lg = lg.WithFields(log.Fields{"root_path": data.RootPath, "file": data.File}) | ||||||
|  | 	lg.Debug("checking if space is available for file decompression") | ||||||
|  | 	err := s.Filesystem().SpaceAvailableForDecompression(data.RootPath, data.File) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		// Handle an unknown format error.
 |  | ||||||
| 		if filesystem.IsErrorCode(err, filesystem.ErrCodeUnknownArchive) { | 		if filesystem.IsErrorCode(err, filesystem.ErrCodeUnknownArchive) { | ||||||
| 			s.Log().WithField("error", err).Warn("failed to decompress file due to unknown format") | 			lg.WithField("error", err).Warn("failed to decompress file: unknown archive format") | ||||||
| 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ | 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "The archive provided is in a format Wings does not understand."}) | ||||||
| 				"error": "unknown archive format", | 			return | ||||||
| 			}) | 		} | ||||||
| 			return | 		middleware.CaptureAndAbort(c, err) | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		NewServerError(err, s).Abort(c) |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if !hasSpace { |  | ||||||
| 		c.AbortWithStatusJSON(http.StatusConflict, gin.H{ |  | ||||||
| 			"error": "This server does not have enough available disk space to decompress this archive.", |  | ||||||
| 		}) |  | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	lg.Info("starting file decompression") | ||||||
| 	if err := s.Filesystem().DecompressFile(data.RootPath, data.File); err != nil { | 	if err := s.Filesystem().DecompressFile(data.RootPath, data.File); err != nil { | ||||||
| 		if errors.Is(err, os.ErrNotExist) { |  | ||||||
| 			c.AbortWithStatusJSON(http.StatusNotFound, gin.H{ |  | ||||||
| 				"error": "The requested archive was not found.", |  | ||||||
| 			}) |  | ||||||
| 			return |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// If the file is busy for some reason just return a nicer error to the user since there is not
 | 		// If the file is busy for some reason just return a nicer error to the user since there is not
 | ||||||
| 		// much we specifically can do. They'll need to stop the running server process in order to overwrite
 | 		// much we specifically can do. They'll need to stop the running server process in order to overwrite
 | ||||||
| 		// a file like this.
 | 		// a file like this.
 | ||||||
| 		if strings.Contains(err.Error(), "text file busy") { | 		if strings.Contains(err.Error(), "text file busy") { | ||||||
| 			s.Log().WithField("error", err).Warn("failed to decompress file due to busy text file") | 			lg.WithField("error", err).Warn("failed to decompress file: text file busy") | ||||||
| 
 |  | ||||||
| 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ | 			c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{ | ||||||
| 				"error": "One or more files this archive is attempting to overwrite are currently in use by another process. Please try again.", | 				"error": "One or more files this archive is attempting to overwrite are currently in use by another process. Please try again.", | ||||||
| 			}) | 			}) | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
| 
 | 		middleware.CaptureAndAbort(c, err) | ||||||
| 		NewServerError(err, s).AbortFilesystemError(c) |  | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	c.Status(http.StatusNoContent) | 	c.Status(http.StatusNoContent) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -539,14 +529,14 @@ func postServerUploadFiles(c *gin.Context) { | ||||||
| 	for _, header := range headers { | 	for _, header := range headers { | ||||||
| 		p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename)) | 		p, err := s.Filesystem().SafePath(filepath.Join(directory, header.Filename)) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			NewServerError(err, s).AbortFilesystemError(c) | 			NewServerError(err, s).Abort(c) | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// We run this in a different method so I can use defer without any of
 | 		// We run this in a different method so I can use defer without any of
 | ||||||
| 		// the consequences caused by calling it in a loop.
 | 		// the consequences caused by calling it in a loop.
 | ||||||
| 		if err := handleFileUpload(p, s, header); err != nil { | 		if err := handleFileUpload(p, s, header); err != nil { | ||||||
| 			NewServerError(err, s).AbortFilesystemError(c) | 			NewServerError(err, s).Abort(c) | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -559,6 +549,9 @@ func handleFileUpload(p string, s *server.Server, header *multipart.FileHeader) | ||||||
| 	} | 	} | ||||||
| 	defer file.Close() | 	defer file.Close() | ||||||
| 
 | 
 | ||||||
|  | 	if err := s.Filesystem().IsIgnored(p); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
| 	if err := s.Filesystem().Writefile(p, file); err != nil { | 	if err := s.Filesystem().Writefile(p, file); err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -73,37 +73,29 @@ func postCreateServer(c *gin.Context) { | ||||||
| 	c.Status(http.StatusAccepted) | 	c.Status(http.StatusAccepted) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Updates the running configuration for this daemon instance.
 | // Updates the running configuration for this Wings instance.
 | ||||||
| func postUpdateConfiguration(c *gin.Context) { | func postUpdateConfiguration(c *gin.Context) { | ||||||
| 	// A backup of the configuration for error purposes.
 | 	cfg := config.Get() | ||||||
| 	ccopy := *config.Get() |  | ||||||
| 	// A copy of the configuration we're using to bind the data received into.
 |  | ||||||
| 	cfg := *config.Get() |  | ||||||
| 
 |  | ||||||
| 	// BindJSON sends 400 if the request fails, all we need to do is return
 |  | ||||||
| 	if err := c.BindJSON(&cfg); err != nil { | 	if err := c.BindJSON(&cfg); err != nil { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	// Keep the SSL certificates the same since the Panel will send through Lets Encrypt
 | 	// Keep the SSL certificates the same since the Panel will send through Lets Encrypt
 | ||||||
| 	// default locations. However, if we picked a different location manually we don't
 | 	// default locations. However, if we picked a different location manually we don't
 | ||||||
| 	// want to override that.
 | 	// want to override that.
 | ||||||
| 	//
 | 	//
 | ||||||
| 	// If you pass through manual locations in the API call this logic will be skipped.
 | 	// If you pass through manual locations in the API call this logic will be skipped.
 | ||||||
| 	if strings.HasPrefix(cfg.Api.Ssl.KeyFile, "/etc/letsencrypt/live/") { | 	if strings.HasPrefix(cfg.Api.Ssl.KeyFile, "/etc/letsencrypt/live/") { | ||||||
| 		cfg.Api.Ssl.KeyFile = strings.ToLower(ccopy.Api.Ssl.KeyFile) | 		cfg.Api.Ssl.KeyFile = strings.ToLower(config.Get().Api.Ssl.KeyFile) | ||||||
| 		cfg.Api.Ssl.CertificateFile = strings.ToLower(ccopy.Api.Ssl.CertificateFile) | 		cfg.Api.Ssl.CertificateFile = strings.ToLower(config.Get().Api.Ssl.CertificateFile) | ||||||
| 	} | 	} | ||||||
| 
 | 	// Try to write this new configuration to the disk before updating our global
 | ||||||
| 	config.Set(&cfg) | 	// state with it.
 | ||||||
| 	if err := config.Get().WriteToDisk(); err != nil { | 	if err := config.WriteToDisk(cfg); err != nil { | ||||||
| 		// If there was an error writing to the disk, revert back to the configuration we had
 | 		WithError(c, err) | ||||||
| 		// before this code was run.
 |  | ||||||
| 		config.Set(&ccopy) |  | ||||||
| 
 |  | ||||||
| 		NewTrackedError(err).Abort(c) |  | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 	// Since we wrote it to the disk successfully now update the global configuration
 | ||||||
|  | 	// state to use this new configuration struct.
 | ||||||
|  | 	config.Set(cfg) | ||||||
| 	c.Status(http.StatusNoContent) | 	c.Status(http.StatusNoContent) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -100,7 +100,7 @@ func getServerArchive(c *gin.Context) { | ||||||
| 
 | 
 | ||||||
| 	c.Header("X-Checksum", checksum) | 	c.Header("X-Checksum", checksum) | ||||||
| 	c.Header("X-Mime-Type", st.Mimetype) | 	c.Header("X-Mime-Type", st.Mimetype) | ||||||
| 	c.Header("Content-Length", strconv.Itoa(int(st.Info.Size()))) | 	c.Header("Content-Length", strconv.Itoa(int(st.Size()))) | ||||||
| 	c.Header("Content-Disposition", "attachment; filename="+s.Archiver.Name()) | 	c.Header("Content-Disposition", "attachment; filename="+s.Archiver.Name()) | ||||||
| 	c.Header("Content-Type", "application/octet-stream") | 	c.Header("Content-Type", "application/octet-stream") | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -46,7 +46,7 @@ func (a *Archiver) Stat() (*filesystem.Stat, error) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return &filesystem.Stat{ | 	return &filesystem.Stat{ | ||||||
| 		Info:     s, | 		FileInfo: s, | ||||||
| 		Mimetype: "application/tar+gzip", | 		Mimetype: "application/tar+gzip", | ||||||
| 	}, nil | 	}, nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -6,6 +6,16 @@ import ( | ||||||
| 	"github.com/pterodactyl/wings/environment" | 	"github.com/pterodactyl/wings/environment" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | type EggConfiguration struct { | ||||||
|  | 	// The internal UUID of the Egg on the Panel.
 | ||||||
|  | 	ID string | ||||||
|  | 
 | ||||||
|  | 	// Maintains a list of files that are blacklisted for opening/editing/downloading
 | ||||||
|  | 	// or basically any type of access on the server by any user. This is NOT the same
 | ||||||
|  | 	// as a per-user denylist, this is defined at the Egg level.
 | ||||||
|  | 	FileDenylist []string `json:"file_denylist"` | ||||||
|  | } | ||||||
|  | 
 | ||||||
| type Configuration struct { | type Configuration struct { | ||||||
| 	mu sync.RWMutex | 	mu sync.RWMutex | ||||||
| 
 | 
 | ||||||
|  | @ -34,6 +44,7 @@ type Configuration struct { | ||||||
| 	CrashDetectionEnabled bool                    `default:"true" json:"enabled" yaml:"enabled"` | 	CrashDetectionEnabled bool                    `default:"true" json:"enabled" yaml:"enabled"` | ||||||
| 	Mounts                []Mount                 `json:"mounts"` | 	Mounts                []Mount                 `json:"mounts"` | ||||||
| 	Resources             ResourceUsage           `json:"resources"` | 	Resources             ResourceUsage           `json:"resources"` | ||||||
|  | 	Egg                   EggConfiguration        `json:"egg,omitempty"` | ||||||
| 
 | 
 | ||||||
| 	Container struct { | 	Container struct { | ||||||
| 		// Defines the Docker image that will be used for this server
 | 		// Defines the Docker image that will be used for this server
 | ||||||
|  |  | ||||||
|  | @ -1,30 +0,0 @@ | ||||||
| package server |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"os" |  | ||||||
| 
 |  | ||||||
| 	"github.com/pterodactyl/wings/server/filesystem" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| func (s *Server) Filesystem() *filesystem.Filesystem { |  | ||||||
| 	return s.fs |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Ensures that the data directory for the server instance exists.
 |  | ||||||
| func (s *Server) EnsureDataDirectoryExists() error { |  | ||||||
| 	if _, err := os.Stat(s.fs.Path()); err != nil && !os.IsNotExist(err) { |  | ||||||
| 		return err |  | ||||||
| 	} else if err != nil { |  | ||||||
| 		// Create the server data directory because it does not currently exist
 |  | ||||||
| 		// on the system.
 |  | ||||||
| 		if err := os.MkdirAll(s.fs.Path(), 0700); err != nil { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		if err := s.fs.Chown("/"); err != nil { |  | ||||||
| 			s.Log().WithField("error", err).Warn("failed to chown server data directory") |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
|  | @ -4,28 +4,29 @@ import ( | ||||||
| 	"archive/tar" | 	"archive/tar" | ||||||
| 	"archive/zip" | 	"archive/zip" | ||||||
| 	"compress/gzip" | 	"compress/gzip" | ||||||
| 	"emperror.dev/errors" |  | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"github.com/mholt/archiver/v3" |  | ||||||
| 	"os" | 	"os" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
| 	"reflect" | 	"reflect" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"sync/atomic" | 	"sync/atomic" | ||||||
|  | 
 | ||||||
|  | 	"emperror.dev/errors" | ||||||
|  | 	"github.com/mholt/archiver/v3" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // Look through a given archive and determine if decompressing it would put the server over
 | // SpaceAvailableForDecompression looks through a given archive and determines
 | ||||||
| // its allocated disk space limit.
 | // if decompressing it would put the server over its allocated disk space limit.
 | ||||||
| func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (bool, error) { | func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) error { | ||||||
| 	// Don't waste time trying to determine this if we know the server will have the space for
 | 	// Don't waste time trying to determine this if we know the server will have the space for
 | ||||||
| 	// it since there is no limit.
 | 	// it since there is no limit.
 | ||||||
| 	if fs.MaxDisk() <= 0 { | 	if fs.MaxDisk() <= 0 { | ||||||
| 		return true, nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	source, err := fs.SafePath(filepath.Join(dir, file)) | 	source, err := fs.SafePath(filepath.Join(dir, file)) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return false, err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Get the cached size in a parallel process so that if it is not cached we are not
 | 	// Get the cached size in a parallel process so that if it is not cached we are not
 | ||||||
|  | @ -38,32 +39,28 @@ func (fs *Filesystem) SpaceAvailableForDecompression(dir string, file string) (b | ||||||
| 		if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() { | 		if atomic.AddInt64(&size, f.Size())+dirSize > fs.MaxDisk() { | ||||||
| 			return &Error{code: ErrCodeDiskSpace} | 			return &Error{code: ErrCodeDiskSpace} | ||||||
| 		} | 		} | ||||||
| 
 |  | ||||||
| 		return nil | 		return nil | ||||||
| 	}) | 	}) | ||||||
| 
 |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		if strings.HasPrefix(err.Error(), "format ") { | 		if strings.HasPrefix(err.Error(), "format ") { | ||||||
| 			return false, &Error{code: ErrCodeUnknownArchive} | 			return &Error{code: ErrCodeUnknownArchive} | ||||||
|  | 		} | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	return err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| 		return false, err | // DecompressFile will decompress a file in a given directory by using the
 | ||||||
| 	} | // archiver tool to infer the file type and go from there. This will walk over
 | ||||||
| 
 | // all of the files within the given archive and ensure that there is not a
 | ||||||
| 	return true, err | // zip-slip attack being attempted by validating that the final path is within
 | ||||||
| } | // the server data directory.
 | ||||||
| 
 |  | ||||||
| // Decompress a file in a given directory by using the archiver tool to infer the file
 |  | ||||||
| // type and go from there. This will walk over all of the files within the given archive
 |  | ||||||
| // and ensure that there is not a zip-slip attack being attempted by validating that the
 |  | ||||||
| // final path is within the server data directory.
 |  | ||||||
| func (fs *Filesystem) DecompressFile(dir string, file string) error { | func (fs *Filesystem) DecompressFile(dir string, file string) error { | ||||||
| 	source, err := fs.SafePath(filepath.Join(dir, file)) | 	source, err := fs.SafePath(filepath.Join(dir, file)) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 	// Ensure that the source archive actually exists on the system.
 | ||||||
| 	// Make sure the file exists basically.
 |  | ||||||
| 	if _, err := os.Stat(source); err != nil { | 	if _, err := os.Stat(source); err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  | @ -79,7 +76,6 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error { | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		var name string | 		var name string | ||||||
| 
 |  | ||||||
| 		switch s := f.Sys().(type) { | 		switch s := f.Sys().(type) { | ||||||
| 		case *tar.Header: | 		case *tar.Header: | ||||||
| 			name = s.Name | 			name = s.Name | ||||||
|  | @ -88,23 +84,28 @@ func (fs *Filesystem) DecompressFile(dir string, file string) error { | ||||||
| 		case *zip.FileHeader: | 		case *zip.FileHeader: | ||||||
| 			name = s.Name | 			name = s.Name | ||||||
| 		default: | 		default: | ||||||
| 			return errors.New(fmt.Sprintf("could not parse underlying data source with type %s", reflect.TypeOf(s).String())) | 			return &Error{ | ||||||
|  | 				code:     ErrCodeUnknownError, | ||||||
|  | 				resolved: filepath.Join(dir, f.Name()), | ||||||
|  | 				err:      errors.New(fmt.Sprintf("could not parse underlying data source with type: %s", reflect.TypeOf(s).String())), | ||||||
|  | 			} | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		p, err := fs.SafePath(filepath.Join(dir, name)) | 		p := filepath.Join(dir, name) | ||||||
| 		if err != nil { | 		// If it is ignored, just don't do anything with the file and skip over it.
 | ||||||
| 			return errors.WithMessage(err, "failed to generate a safe path to server file") | 		if err := fs.IsIgnored(p); err != nil { | ||||||
|  | 			return nil | ||||||
| 		} | 		} | ||||||
| 
 | 		if err := fs.Writefile(p, f); err != nil { | ||||||
| 		return errors.WithMessage(fs.Writefile(p, f), "could not extract file from archive") | 			return &Error{code: ErrCodeUnknownError, err: err, resolved: source} | ||||||
|  | 		} | ||||||
|  | 		return nil | ||||||
| 	}) | 	}) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		if strings.HasPrefix(err.Error(), "format ") { | 		if strings.HasPrefix(err.Error(), "format ") { | ||||||
| 			return &Error{code: ErrCodeUnknownArchive} | 			return &Error{code: ErrCodeUnknownArchive} | ||||||
| 		} | 		} | ||||||
| 
 |  | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -1,11 +1,12 @@ | ||||||
| package filesystem | package filesystem | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"emperror.dev/errors" |  | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"github.com/apex/log" |  | ||||||
| 	"os" | 	"os" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
|  | 
 | ||||||
|  | 	"emperror.dev/errors" | ||||||
|  | 	"github.com/apex/log" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type ErrorCode string | type ErrorCode string | ||||||
|  | @ -15,61 +16,61 @@ const ( | ||||||
| 	ErrCodeDiskSpace      ErrorCode = "E_NODISK" | 	ErrCodeDiskSpace      ErrorCode = "E_NODISK" | ||||||
| 	ErrCodeUnknownArchive ErrorCode = "E_UNKNFMT" | 	ErrCodeUnknownArchive ErrorCode = "E_UNKNFMT" | ||||||
| 	ErrCodePathResolution ErrorCode = "E_BADPATH" | 	ErrCodePathResolution ErrorCode = "E_BADPATH" | ||||||
|  | 	ErrCodeDenylistFile   ErrorCode = "E_DENYLIST" | ||||||
|  | 	ErrCodeUnknownError   ErrorCode = "E_UNKNOWN" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type Error struct { | type Error struct { | ||||||
| 	code ErrorCode | 	code ErrorCode | ||||||
| 	path     string | 	// Contains the underlying error leading to this. This value may or may not be
 | ||||||
|  | 	// present, it is entirely dependent on how this error was triggered.
 | ||||||
|  | 	err error | ||||||
|  | 	// This contains the value of the final destination that triggered this specific
 | ||||||
|  | 	// error event.
 | ||||||
| 	resolved string | 	resolved string | ||||||
|  | 	// This value is generally only present on errors stemming from a path resolution
 | ||||||
|  | 	// error. For everything else you should be setting and reading the resolved path
 | ||||||
|  | 	// value which will be far more useful.
 | ||||||
|  | 	path string | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Code returns the ErrorCode for this specific error instance.
 | ||||||
|  | func (e *Error) Code() ErrorCode { | ||||||
|  | 	return e.code | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Returns a human-readable error string to identify the Error by.
 | // Returns a human-readable error string to identify the Error by.
 | ||||||
| func (e *Error) Error() string { | func (e *Error) Error() string { | ||||||
| 	switch e.code { | 	switch e.code { | ||||||
| 	case ErrCodeIsDirectory: | 	case ErrCodeIsDirectory: | ||||||
| 		return "filesystem: is a directory" | 		return fmt.Sprintf("filesystem: cannot perform action: [%s] is a directory", e.resolved) | ||||||
| 	case ErrCodeDiskSpace: | 	case ErrCodeDiskSpace: | ||||||
| 		return "filesystem: not enough disk space" | 		return "filesystem: not enough disk space" | ||||||
| 	case ErrCodeUnknownArchive: | 	case ErrCodeUnknownArchive: | ||||||
| 		return "filesystem: unknown archive format" | 		return "filesystem: unknown archive format" | ||||||
|  | 	case ErrCodeDenylistFile: | ||||||
|  | 		r := e.resolved | ||||||
|  | 		if r == "" { | ||||||
|  | 			r = "<empty>" | ||||||
|  | 		} | ||||||
|  | 		return fmt.Sprintf("filesystem: file access prohibited: [%s] is on the denylist", r) | ||||||
| 	case ErrCodePathResolution: | 	case ErrCodePathResolution: | ||||||
| 		r := e.resolved | 		r := e.resolved | ||||||
| 		if r == "" { | 		if r == "" { | ||||||
| 			r = "<empty>" | 			r = "<empty>" | ||||||
| 		} | 		} | ||||||
| 		return fmt.Sprintf("filesystem: server path [%s] resolves to a location outside the server root: %s", e.path, r) | 		return fmt.Sprintf("filesystem: server path [%s] resolves to a location outside the server root: %s", e.path, r) | ||||||
|  | 	case ErrCodeUnknownError: | ||||||
|  | 		fallthrough | ||||||
|  | 	default: | ||||||
|  | 		return fmt.Sprintf("filesystem: an error occurred: %s", e.Cause()) | ||||||
| 	} | 	} | ||||||
| 	return "filesystem: unhandled error type" |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Returns the ErrorCode for this specific error instance.
 | // Cause returns the underlying cause of this filesystem error. In some causes
 | ||||||
| func (e *Error) Code() ErrorCode { | // there may not be a cause present, in which case nil will be returned.
 | ||||||
| 	return e.code | func (e *Error) Cause() error { | ||||||
| } | 	return e.err | ||||||
| 
 |  | ||||||
| // Checks if the given error is one of the Filesystem errors.
 |  | ||||||
| func IsFilesystemError(err error) (*Error, bool) { |  | ||||||
| 	if e := errors.Unwrap(err); e != nil { |  | ||||||
| 		err = e |  | ||||||
| 	} |  | ||||||
| 	if fserr, ok := err.(*Error); ok { |  | ||||||
| 		return fserr, true |  | ||||||
| 	} |  | ||||||
| 	return nil, false |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Checks if "err" is a filesystem Error type. If so, it will then drop in and check
 |  | ||||||
| // that the error code is the same as the provided ErrorCode passed in "code".
 |  | ||||||
| func IsErrorCode(err error, code ErrorCode) bool { |  | ||||||
| 	if e, ok := IsFilesystemError(err); ok { |  | ||||||
| 		return e.code == code |  | ||||||
| 	} |  | ||||||
| 	return false |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Returns a new BadPathResolution error.
 |  | ||||||
| func NewBadPathResolution(path string, resolved string) *Error { |  | ||||||
| 	return &Error{code: ErrCodePathResolution, path: path, resolved: resolved} |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Generates an error logger instance with some basic information.
 | // Generates an error logger instance with some basic information.
 | ||||||
|  | @ -86,10 +87,46 @@ func (fs *Filesystem) handleWalkerError(err error, f os.FileInfo) error { | ||||||
| 	if !IsErrorCode(err, ErrCodePathResolution) { | 	if !IsErrorCode(err, ErrCodePathResolution) { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	if f != nil && f.IsDir() { | 	if f != nil && f.IsDir() { | ||||||
| 		return filepath.SkipDir | 		return filepath.SkipDir | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // IsFilesystemError checks if the given error is one of the Filesystem errors.
 | ||||||
|  | func IsFilesystemError(err error) bool { | ||||||
|  | 	var fserr *Error | ||||||
|  | 	if err != nil && errors.As(err, &fserr) { | ||||||
|  | 		return true | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // IsErrorCode checks if "err" is a filesystem Error type. If so, it will then
 | ||||||
|  | // drop in and check that the error code is the same as the provided ErrorCode
 | ||||||
|  | // passed in "code".
 | ||||||
|  | func IsErrorCode(err error, code ErrorCode) bool { | ||||||
|  | 	var fserr *Error | ||||||
|  | 	if err != nil && errors.As(err, &fserr) { | ||||||
|  | 		return fserr.code == code | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // NewBadPathResolution returns a new BadPathResolution error.
 | ||||||
|  | func NewBadPathResolution(path string, resolved string) *Error { | ||||||
|  | 	return &Error{code: ErrCodePathResolution, path: path, resolved: resolved} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // WrapError wraps the provided error as a Filesystem error and attaches the
 | ||||||
|  | // provided resolved source to it. If the error is already a Filesystem error
 | ||||||
|  | // no action is taken.
 | ||||||
|  | func WrapError(err error, resolved string) *Error { | ||||||
|  | 	if err == nil { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 	if IsFilesystemError(err) { | ||||||
|  | 		return err.(*Error) | ||||||
|  | 	} | ||||||
|  | 	return &Error{code: ErrCodeUnknownError, err: err, resolved: resolved} | ||||||
|  | } | ||||||
|  | @ -2,11 +2,6 @@ package filesystem | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"bufio" | 	"bufio" | ||||||
| 	"emperror.dev/errors" |  | ||||||
| 	"github.com/gabriel-vasile/mimetype" |  | ||||||
| 	"github.com/karrick/godirwalk" |  | ||||||
| 	"github.com/pterodactyl/wings/config" |  | ||||||
| 	"github.com/pterodactyl/wings/system" |  | ||||||
| 	"io" | 	"io" | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| 	"os" | 	"os" | ||||||
|  | @ -17,6 +12,13 @@ import ( | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"sync" | 	"sync" | ||||||
| 	"time" | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"emperror.dev/errors" | ||||||
|  | 	"github.com/gabriel-vasile/mimetype" | ||||||
|  | 	"github.com/karrick/godirwalk" | ||||||
|  | 	"github.com/pterodactyl/wings/config" | ||||||
|  | 	"github.com/pterodactyl/wings/system" | ||||||
|  | 	ignore "github.com/sabhiram/go-gitignore" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type Filesystem struct { | type Filesystem struct { | ||||||
|  | @ -25,6 +27,7 @@ type Filesystem struct { | ||||||
| 	lookupInProgress  *system.AtomicBool | 	lookupInProgress  *system.AtomicBool | ||||||
| 	diskUsed          int64 | 	diskUsed          int64 | ||||||
| 	diskCheckInterval time.Duration | 	diskCheckInterval time.Duration | ||||||
|  | 	denylist          *ignore.GitIgnore | ||||||
| 
 | 
 | ||||||
| 	// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
 | 	// The maximum amount of disk space (in bytes) that this Filesystem instance can use.
 | ||||||
| 	diskLimit int64 | 	diskLimit int64 | ||||||
|  | @ -35,42 +38,78 @@ type Filesystem struct { | ||||||
| 	isTest bool | 	isTest bool | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Creates a new Filesystem instance for a given server.
 | // New creates a new Filesystem instance for a given server.
 | ||||||
| func New(root string, size int64) *Filesystem { | func New(root string, size int64, denylist []string) *Filesystem { | ||||||
| 	return &Filesystem{ | 	return &Filesystem{ | ||||||
| 		root:              root, | 		root:              root, | ||||||
| 		diskLimit:         size, | 		diskLimit:         size, | ||||||
| 		diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval), | 		diskCheckInterval: time.Duration(config.Get().System.DiskCheckInterval), | ||||||
| 		lastLookupTime:    &usageLookupTime{}, | 		lastLookupTime:    &usageLookupTime{}, | ||||||
| 		lookupInProgress:  system.NewAtomicBool(false), | 		lookupInProgress:  system.NewAtomicBool(false), | ||||||
|  | 		denylist:          ignore.CompileIgnoreLines(denylist...), | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Returns the root path for the Filesystem instance.
 | // Path returns the root path for the Filesystem instance.
 | ||||||
| func (fs *Filesystem) Path() string { | func (fs *Filesystem) Path() string { | ||||||
| 	return fs.root | 	return fs.root | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Returns a reader for a file instance.
 | // File returns a reader for a file instance as well as the stat information.
 | ||||||
| func (fs *Filesystem) File(p string) (*os.File, os.FileInfo, error) { | func (fs *Filesystem) File(p string) (*os.File, Stat, error) { | ||||||
| 	cleaned, err := fs.SafePath(p) | 	cleaned, err := fs.SafePath(p) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, nil, err | 		return nil, Stat{}, err | ||||||
| 	} | 	} | ||||||
| 	st, err := os.Stat(cleaned) | 	st, err := fs.Stat(cleaned) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, nil, err | 		return nil, Stat{}, err | ||||||
| 	} | 	} | ||||||
| 	if st.IsDir() { | 	if st.IsDir() { | ||||||
| 		return nil, nil, &Error{code: ErrCodeIsDirectory} | 		return nil, Stat{}, &Error{code: ErrCodeIsDirectory} | ||||||
| 	} | 	} | ||||||
| 	f, err := os.Open(cleaned) | 	f, err := os.Open(cleaned) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, nil, err | 		return nil, Stat{}, err | ||||||
| 	} | 	} | ||||||
| 	return f, st, nil | 	return f, st, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // Acts by creating the given file and path on the disk if it is not present already. If
 | ||||||
|  | // it is present, the file is opened using the defaults which will truncate the contents.
 | ||||||
|  | // The opened file is then returned to the caller.
 | ||||||
|  | func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) { | ||||||
|  | 	cleaned, err := fs.SafePath(p) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	f, err := os.OpenFile(cleaned, flag, 0644) | ||||||
|  | 	if err == nil { | ||||||
|  | 		return f, nil | ||||||
|  | 	} | ||||||
|  | 	// If the error is not because it doesn't exist then we just need to bail at this point.
 | ||||||
|  | 	if !errors.Is(err, os.ErrNotExist) { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	// Create the path leading up to the file we're trying to create, setting the final perms
 | ||||||
|  | 	// on it as we go.
 | ||||||
|  | 	if err := os.MkdirAll(filepath.Dir(cleaned), 0755); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	if err := fs.Chown(filepath.Dir(cleaned)); err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	o := &fileOpener{} | ||||||
|  | 	// Try to open the file now that we have created the pathing necessary for it, and then
 | ||||||
|  | 	// Chown that file so that the permissions don't mess with things.
 | ||||||
|  | 	f, err = o.open(cleaned, flag, 0644) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return nil, err | ||||||
|  | 	} | ||||||
|  | 	_ = fs.Chown(cleaned) | ||||||
|  | 	return f, nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // Reads a file on the system and returns it as a byte representation in a file
 | // Reads a file on the system and returns it as a byte representation in a file
 | ||||||
| // reader. This is not the most memory efficient usage since it will be reading the
 | // reader. This is not the most memory efficient usage since it will be reading the
 | ||||||
| // entirety of the file into memory.
 | // entirety of the file into memory.
 | ||||||
|  | @ -84,7 +123,8 @@ func (fs *Filesystem) Readfile(p string, w io.Writer) error { | ||||||
| 	return err | 	return err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Writes a file to the system. If the file does not already exist one will be created.
 | // Writefile writes a file to the system. If the file does not already exist one
 | ||||||
|  | // will be created.
 | ||||||
| func (fs *Filesystem) Writefile(p string, r io.Reader) error { | func (fs *Filesystem) Writefile(p string, r io.Reader) error { | ||||||
| 	cleaned, err := fs.SafePath(p) | 	cleaned, err := fs.SafePath(p) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
|  | @ -99,7 +139,7 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error { | ||||||
| 		return err | 		return err | ||||||
| 	} else if err == nil { | 	} else if err == nil { | ||||||
| 		if stat.IsDir() { | 		if stat.IsDir() { | ||||||
| 			return &Error{code: ErrCodeIsDirectory} | 			return &Error{code: ErrCodeIsDirectory, resolved: cleaned} | ||||||
| 		} | 		} | ||||||
| 		currentSize = stat.Size() | 		currentSize = stat.Size() | ||||||
| 	} | 	} | ||||||
|  | @ -112,22 +152,9 @@ func (fs *Filesystem) Writefile(p string, r io.Reader) error { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// If we were unable to stat the location because it did not exist, go ahead and create
 | 	// Touch the file and return the handle to it at this point. This will create the file
 | ||||||
| 	// it now. We do this after checking the disk space so that we do not just create empty
 | 	// and any necessary directories as needed.
 | ||||||
| 	// directories at random.
 | 	file, err := fs.Touch(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC) | ||||||
| 	if err != nil { |  | ||||||
| 		if err := os.MkdirAll(filepath.Dir(cleaned), 0755); err != nil { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 		if err := fs.Chown(filepath.Dir(cleaned)); err != nil { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	o := &fileOpener{} |  | ||||||
| 	// This will either create the file if it does not already exist, or open and
 |  | ||||||
| 	// truncate the existing file.
 |  | ||||||
| 	file, err := o.open(cleaned, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
|  | @ -150,7 +177,6 @@ func (fs *Filesystem) CreateDirectory(name string, p string) error { | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	return os.MkdirAll(cleaned, 0755) | 	return os.MkdirAll(cleaned, 0755) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -411,9 +437,9 @@ func (fo *fileOpener) open(path string, flags int, perm os.FileMode) (*os.File, | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Lists the contents of a given directory and returns stat information about each
 | // ListDirectory lists the contents of a given directory and returns stat
 | ||||||
| // file and folder within it.
 | // information about each file and folder within it.
 | ||||||
| func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) { | func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) { | ||||||
| 	cleaned, err := fs.SafePath(p) | 	cleaned, err := fs.SafePath(p) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
|  | @ -429,7 +455,7 @@ func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) { | ||||||
| 	// You must initialize the output of this directory as a non-nil value otherwise
 | 	// You must initialize the output of this directory as a non-nil value otherwise
 | ||||||
| 	// when it is marshaled into a JSON object you'll just get 'null' back, which will
 | 	// when it is marshaled into a JSON object you'll just get 'null' back, which will
 | ||||||
| 	// break the panel badly.
 | 	// break the panel badly.
 | ||||||
| 	out := make([]*Stat, len(files)) | 	out := make([]Stat, len(files)) | ||||||
| 
 | 
 | ||||||
| 	// Iterate over all of the files and directories returned and perform an async process
 | 	// Iterate over all of the files and directories returned and perform an async process
 | ||||||
| 	// to get the mime-type for them all.
 | 	// to get the mime-type for them all.
 | ||||||
|  | @ -456,15 +482,10 @@ func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) { | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			st := &Stat{ | 			st := Stat{FileInfo: f, Mimetype: d} | ||||||
| 				Info:     f, |  | ||||||
| 				Mimetype: d, |  | ||||||
| 			} |  | ||||||
| 
 |  | ||||||
| 			if m != nil { | 			if m != nil { | ||||||
| 				st.Mimetype = m.String() | 				st.Mimetype = m.String() | ||||||
| 			} | 			} | ||||||
| 
 |  | ||||||
| 			out[idx] = st | 			out[idx] = st | ||||||
| 		}(i, file) | 		}(i, file) | ||||||
| 	} | 	} | ||||||
|  | @ -474,17 +495,16 @@ func (fs *Filesystem) ListDirectory(p string) ([]*Stat, error) { | ||||||
| 	// Sort the output alphabetically to begin with since we've run the output
 | 	// Sort the output alphabetically to begin with since we've run the output
 | ||||||
| 	// through an asynchronous process and the order is gonna be very random.
 | 	// through an asynchronous process and the order is gonna be very random.
 | ||||||
| 	sort.SliceStable(out, func(i, j int) bool { | 	sort.SliceStable(out, func(i, j int) bool { | ||||||
| 		if out[i].Info.Name() == out[j].Info.Name() || out[i].Info.Name() > out[j].Info.Name() { | 		if out[i].Name() == out[j].Name() || out[i].Name() > out[j].Name() { | ||||||
| 			return true | 			return true | ||||||
| 		} | 		} | ||||||
| 
 |  | ||||||
| 		return false | 		return false | ||||||
| 	}) | 	}) | ||||||
| 
 | 
 | ||||||
| 	// Then, sort it so that directories are listed first in the output. Everything
 | 	// Then, sort it so that directories are listed first in the output. Everything
 | ||||||
| 	// will continue to be alphabetized at this point.
 | 	// will continue to be alphabetized at this point.
 | ||||||
| 	sort.SliceStable(out, func(i, j int) bool { | 	sort.SliceStable(out, func(i, j int) bool { | ||||||
| 		return out[i].Info.IsDir() | 		return out[i].IsDir() | ||||||
| 	}) | 	}) | ||||||
| 
 | 
 | ||||||
| 	return out, nil | 	return out, nil | ||||||
|  |  | ||||||
|  | @ -3,8 +3,6 @@ package filesystem | ||||||
| import ( | import ( | ||||||
| 	"bytes" | 	"bytes" | ||||||
| 	"errors" | 	"errors" | ||||||
| 	. "github.com/franela/goblin" |  | ||||||
| 	"github.com/pterodactyl/wings/config" |  | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| 	"math/rand" | 	"math/rand" | ||||||
| 	"os" | 	"os" | ||||||
|  | @ -12,6 +10,9 @@ import ( | ||||||
| 	"sync/atomic" | 	"sync/atomic" | ||||||
| 	"testing" | 	"testing" | ||||||
| 	"unicode/utf8" | 	"unicode/utf8" | ||||||
|  | 
 | ||||||
|  | 	. "github.com/franela/goblin" | ||||||
|  | 	"github.com/pterodactyl/wings/config" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func NewFs() (*Filesystem, *rootFs) { | func NewFs() (*Filesystem, *rootFs) { | ||||||
|  | @ -33,7 +34,7 @@ func NewFs() (*Filesystem, *rootFs) { | ||||||
| 
 | 
 | ||||||
| 	rfs.reset() | 	rfs.reset() | ||||||
| 
 | 
 | ||||||
| 	fs := New(filepath.Join(tmpDir, "/server"), 0) | 	fs := New(filepath.Join(tmpDir, "/server"), 0, []string{}) | ||||||
| 	fs.isTest = true | 	fs.isTest = true | ||||||
| 
 | 
 | ||||||
| 	return fs, &rfs | 	return fs, &rfs | ||||||
|  |  | ||||||
|  | @ -2,13 +2,29 @@ package filesystem | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"golang.org/x/sync/errgroup" |  | ||||||
| 	"os" | 	"os" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"sync" | 	"sync" | ||||||
|  | 
 | ||||||
|  | 	"golang.org/x/sync/errgroup" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | // Checks if the given file or path is in the server's file denylist. If so, an Error
 | ||||||
|  | // is returned, otherwise nil is returned.
 | ||||||
|  | func (fs *Filesystem) IsIgnored(paths ...string) error { | ||||||
|  | 	for _, p := range paths { | ||||||
|  | 		sp, err := fs.SafePath(p) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		if fs.denylist.MatchesPath(sp) { | ||||||
|  | 			return &Error{code: ErrCodeDenylistFile, path: p, resolved: sp} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // Normalizes a directory being passed in to ensure the user is not able to escape
 | // Normalizes a directory being passed in to ensure the user is not able to escape
 | ||||||
| // from their data directory. After normalization if the directory is still within their home
 | // from their data directory. After normalization if the directory is still within their home
 | ||||||
| // path it is returned. If they managed to "escape" an error will be returned.
 | // path it is returned. If they managed to "escape" an error will be returned.
 | ||||||
|  |  | ||||||
|  | @ -2,14 +2,15 @@ package filesystem | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"encoding/json" | 	"encoding/json" | ||||||
| 	"github.com/gabriel-vasile/mimetype" |  | ||||||
| 	"os" | 	"os" | ||||||
| 	"strconv" | 	"strconv" | ||||||
| 	"time" | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"github.com/gabriel-vasile/mimetype" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type Stat struct { | type Stat struct { | ||||||
| 	Info     os.FileInfo | 	os.FileInfo | ||||||
| 	Mimetype string | 	Mimetype string | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -26,50 +27,48 @@ func (s *Stat) MarshalJSON() ([]byte, error) { | ||||||
| 		Symlink   bool   `json:"symlink"` | 		Symlink   bool   `json:"symlink"` | ||||||
| 		Mime      string `json:"mime"` | 		Mime      string `json:"mime"` | ||||||
| 	}{ | 	}{ | ||||||
| 		Name:     s.Info.Name(), | 		Name:     s.Name(), | ||||||
| 		Created:  s.CTime().Format(time.RFC3339), | 		Created:  s.CTime().Format(time.RFC3339), | ||||||
| 		Modified: s.Info.ModTime().Format(time.RFC3339), | 		Modified: s.ModTime().Format(time.RFC3339), | ||||||
| 		Mode:     s.Info.Mode().String(), | 		Mode:     s.Mode().String(), | ||||||
| 		// Using `&os.ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
 | 		// Using `&os.ModePerm` on the file's mode will cause the mode to only have the permission values, and nothing else.
 | ||||||
| 		ModeBits:  strconv.FormatUint(uint64(s.Info.Mode()&os.ModePerm), 8), | 		ModeBits:  strconv.FormatUint(uint64(s.Mode()&os.ModePerm), 8), | ||||||
| 		Size:      s.Info.Size(), | 		Size:      s.Size(), | ||||||
| 		Directory: s.Info.IsDir(), | 		Directory: s.IsDir(), | ||||||
| 		File:      !s.Info.IsDir(), | 		File:      !s.IsDir(), | ||||||
| 		Symlink:   s.Info.Mode().Perm()&os.ModeSymlink != 0, | 		Symlink:   s.Mode().Perm()&os.ModeSymlink != 0, | ||||||
| 		Mime:      s.Mimetype, | 		Mime:      s.Mimetype, | ||||||
| 	}) | 	}) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Stats a file or folder and returns the base stat object from go along with the
 | // Stat stats a file or folder and returns the base stat object from go along
 | ||||||
| // MIME data that can be used for editing files.
 | // with the MIME data that can be used for editing files.
 | ||||||
| func (fs *Filesystem) Stat(p string) (*Stat, error) { | func (fs *Filesystem) Stat(p string) (Stat, error) { | ||||||
| 	cleaned, err := fs.SafePath(p) | 	cleaned, err := fs.SafePath(p) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return Stat{}, err | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	return fs.unsafeStat(cleaned) | 	return fs.unsafeStat(cleaned) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (fs *Filesystem) unsafeStat(p string) (*Stat, error) { | func (fs *Filesystem) unsafeStat(p string) (Stat, error) { | ||||||
| 	s, err := os.Stat(p) | 	s, err := os.Stat(p) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return Stat{}, err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	var m *mimetype.MIME | 	var m *mimetype.MIME | ||||||
| 	if !s.IsDir() { | 	if !s.IsDir() { | ||||||
| 		m, err = mimetype.DetectFile(p) | 		m, err = mimetype.DetectFile(p) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return nil, err | 			return Stat{}, err | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	st := &Stat{ | 	st := Stat{ | ||||||
| 		Info:     s, | 		FileInfo: s, | ||||||
| 		Mimetype: "inode/directory", | 		Mimetype: "inode/directory", | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	if m != nil { | 	if m != nil { | ||||||
| 		st.Mimetype = m.String() | 		st.Mimetype = m.String() | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -5,9 +5,9 @@ import ( | ||||||
| 	"time" | 	"time" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // Returns the time that the file/folder was created.
 | // CTime returns the time that the file/folder was created.
 | ||||||
| func (s *Stat) CTime() time.Time { | func (s *Stat) CTime() time.Time { | ||||||
| 	st := s.Info.Sys().(*syscall.Stat_t) | 	st := s.Sys().(*syscall.Stat_t) | ||||||
| 
 | 
 | ||||||
| 	return time.Unix(st.Ctimespec.Sec, st.Ctimespec.Nsec) | 	return time.Unix(st.Ctimespec.Sec, st.Ctimespec.Nsec) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -7,7 +7,7 @@ import ( | ||||||
| 
 | 
 | ||||||
| // Returns the time that the file/folder was created.
 | // Returns the time that the file/folder was created.
 | ||||||
| func (s *Stat) CTime() time.Time { | func (s *Stat) CTime() time.Time { | ||||||
| 	st := s.Info.Sys().(*syscall.Stat_t) | 	st := s.Sys().(*syscall.Stat_t) | ||||||
| 
 | 
 | ||||||
| 	// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
 | 	// Do not remove these "redundant" type-casts, they are required for 32-bit builds to work.
 | ||||||
| 	return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)) | 	return time.Unix(int64(st.Ctim.Sec), int64(st.Ctim.Nsec)) | ||||||
|  |  | ||||||
|  | @ -8,5 +8,5 @@ import ( | ||||||
| // However, I have no idea how to do this on windows, so we're skipping it
 | // However, I have no idea how to do this on windows, so we're skipping it
 | ||||||
| // for right now.
 | // for right now.
 | ||||||
| func (s *Stat) CTime() time.Time { | func (s *Stat) CTime() time.Time { | ||||||
| 	return s.Info.ModTime() | 	return s.ModTime() | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -127,7 +127,7 @@ func NewInstallationProcess(s *Server, script *api.InstallationScript) (*Install | ||||||
| 		Server: s, | 		Server: s, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	if c, err := environment.DockerClient(); err != nil { | 	if c, err := environment.Docker(); err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} else { | 	} else { | ||||||
| 		proc.client = c | 		proc.client = c | ||||||
|  | @ -326,7 +326,7 @@ func (ip *InstallationProcess) BeforeExecute() error { | ||||||
| 
 | 
 | ||||||
| // Returns the log path for the installation process.
 | // Returns the log path for the installation process.
 | ||||||
| func (ip *InstallationProcess) GetLogPath() string { | func (ip *InstallationProcess) GetLogPath() string { | ||||||
| 	return filepath.Join(config.Get().System.GetInstallLogPath(), ip.Server.Id()+".log") | 	return filepath.Join(config.Get().System.LogDirectory, "/install", ip.Server.Id()+".log") | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Cleans up after the execution of the installation process. This grabs the logs from the
 | // Cleans up after the execution of the installation process. This grabs the logs from the
 | ||||||
|  | @ -447,6 +447,14 @@ func (ip *InstallationProcess) Execute() (string, error) { | ||||||
| 		NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode), | 		NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode), | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	// Ensure the root directory for the server exists properly before attempting
 | ||||||
|  | 	// to trigger the reinstall of the server. It is possible the directory would
 | ||||||
|  | 	// not exist when this runs if Wings boots with a missing directory and a user
 | ||||||
|  | 	// triggers a reinstall before trying to start the server.
 | ||||||
|  | 	if err := ip.Server.EnsureDataDirectoryExists(); err != nil { | ||||||
|  | 		return "", err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	ip.Server.Log().WithField("install_script", ip.tempDir()+"/install.sh").Info("creating install container for server process") | 	ip.Server.Log().WithField("install_script", ip.tempDir()+"/install.sh").Info("creating install container for server process") | ||||||
| 	// Remove the temporary directory when the installation process finishes for this server container.
 | 	// Remove the temporary directory when the installation process finishes for this server container.
 | ||||||
| 	defer func() { | 	defer func() { | ||||||
|  |  | ||||||
|  | @ -136,7 +136,7 @@ func (s *Server) StartEventListeners() { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	s.Log().Info("registering event listeners: console, state, resources...") | 	s.Log().Debug("registering event listeners: console, state, resources...") | ||||||
| 	s.Environment.Events().On(environment.ConsoleOutputEvent, &console) | 	s.Environment.Events().On(environment.ConsoleOutputEvent, &console) | ||||||
| 	s.Environment.Events().On(environment.StateChangeEvent, &state) | 	s.Environment.Events().On(environment.StateChangeEvent, &state) | ||||||
| 	s.Environment.Events().On(environment.ResourceEvent, &stats) | 	s.Environment.Events().On(environment.ResourceEvent, &stats) | ||||||
|  |  | ||||||
|  | @ -90,7 +90,7 @@ func FromConfiguration(data api.ServerConfigurationResponse) (*Server, error) { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	s.Archiver = Archiver{Server: s} | 	s.Archiver = Archiver{Server: s} | ||||||
| 	s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace()) | 	s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace(), s.Config().Egg.FileDenylist) | ||||||
| 
 | 
 | ||||||
| 	// Right now we only support a Docker based environment, so I'm going to hard code
 | 	// Right now we only support a Docker based environment, so I'm going to hard code
 | ||||||
| 	// this logic in. When we're ready to support other environment we'll need to make
 | 	// this logic in. When we're ready to support other environment we'll need to make
 | ||||||
|  |  | ||||||
|  | @ -3,6 +3,7 @@ package server | ||||||
| import ( | import ( | ||||||
| 	"context" | 	"context" | ||||||
| 	"fmt" | 	"fmt" | ||||||
|  | 	"os" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"sync" | 	"sync" | ||||||
| 
 | 
 | ||||||
|  | @ -224,3 +225,27 @@ func (s *Server) ProcessConfiguration() *api.ProcessConfiguration { | ||||||
| 
 | 
 | ||||||
| 	return s.procConfig | 	return s.procConfig | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // Filesystem returns an instance of the filesystem for this server.
 | ||||||
|  | func (s *Server) Filesystem() *filesystem.Filesystem { | ||||||
|  | 	return s.fs | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // EnsureDataDirectoryExists ensures that the data directory for the server
 | ||||||
|  | // instance exists.
 | ||||||
|  | func (s *Server) EnsureDataDirectoryExists() error { | ||||||
|  | 	if _, err := os.Lstat(s.fs.Path()); err != nil { | ||||||
|  | 		if os.IsNotExist(err) { | ||||||
|  | 			s.Log().Debug("server: creating root directory and setting permissions") | ||||||
|  | 			if err := os.MkdirAll(s.fs.Path(), 0700); err != nil { | ||||||
|  | 				return errors.WithStack(err) | ||||||
|  | 			} | ||||||
|  | 			if err := s.fs.Chown("/"); err != nil { | ||||||
|  | 				s.Log().WithField("error", err).Warn("server: failed to chown server data directory") | ||||||
|  | 			} | ||||||
|  | 		} else { | ||||||
|  | 			return errors.WrapIf(err, "server: failed to stat server root directory") | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | @ -1,19 +0,0 @@ | ||||||
| package sftp |  | ||||||
| 
 |  | ||||||
| type fxerr uint32 |  | ||||||
| 
 |  | ||||||
| const ( |  | ||||||
| 	// Extends the default SFTP server to return a quota exceeded error to the client.
 |  | ||||||
| 	//
 |  | ||||||
| 	// @see https://tools.ietf.org/id/draft-ietf-secsh-filexfer-13.txt
 |  | ||||||
| 	ErrSshQuotaExceeded = fxerr(15) |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| func (e fxerr) Error() string { |  | ||||||
| 	switch e { |  | ||||||
| 	case ErrSshQuotaExceeded: |  | ||||||
| 		return "Quota Exceeded" |  | ||||||
| 	default: |  | ||||||
| 		return "Failure" |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
							
								
								
									
										418
									
								
								sftp/handler.go
									
									
									
									
									
								
							
							
						
						
									
										418
									
								
								sftp/handler.go
									
									
									
									
									
								
							|  | @ -5,31 +5,17 @@ import ( | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| 	"os" | 	"os" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
|  | 	"strings" | ||||||
| 	"sync" | 	"sync" | ||||||
| 
 | 
 | ||||||
|  | 	"emperror.dev/errors" | ||||||
| 	"github.com/apex/log" | 	"github.com/apex/log" | ||||||
| 	"github.com/patrickmn/go-cache" |  | ||||||
| 	"github.com/pkg/sftp" | 	"github.com/pkg/sftp" | ||||||
|  | 	"github.com/pterodactyl/wings/config" | ||||||
|  | 	"github.com/pterodactyl/wings/server/filesystem" | ||||||
|  | 	"golang.org/x/crypto/ssh" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type FileSystem struct { |  | ||||||
| 	UUID        string |  | ||||||
| 	Permissions []string |  | ||||||
| 	ReadOnly    bool |  | ||||||
| 	User        User |  | ||||||
| 	Cache       *cache.Cache |  | ||||||
| 
 |  | ||||||
| 	PathValidator func(fs FileSystem, p string) (string, error) |  | ||||||
| 	HasDiskSpace  func(fs FileSystem) bool |  | ||||||
| 
 |  | ||||||
| 	logger *log.Entry |  | ||||||
| 	lock   sync.Mutex |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (fs FileSystem) buildPath(p string) (string, error) { |  | ||||||
| 	return fs.PathValidator(fs, p) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| const ( | const ( | ||||||
| 	PermissionFileRead        = "file.read" | 	PermissionFileRead        = "file.read" | ||||||
| 	PermissionFileReadContent = "file.read-content" | 	PermissionFileReadContent = "file.read-content" | ||||||
|  | @ -38,343 +24,269 @@ const ( | ||||||
| 	PermissionFileDelete      = "file.delete" | 	PermissionFileDelete      = "file.delete" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | type Handler struct { | ||||||
|  | 	permissions []string | ||||||
|  | 	mu          sync.Mutex | ||||||
|  | 	fs          *filesystem.Filesystem | ||||||
|  | 	logger      *log.Entry | ||||||
|  | 	ro          bool | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Returns a new connection handler for the SFTP server. This allows a given user
 | ||||||
|  | // to access the underlying filesystem.
 | ||||||
|  | func NewHandler(sc *ssh.ServerConn, fs *filesystem.Filesystem) *Handler { | ||||||
|  | 	return &Handler{ | ||||||
|  | 		fs:          fs, | ||||||
|  | 		ro:          config.Get().System.Sftp.ReadOnly, | ||||||
|  | 		permissions: strings.Split(sc.Permissions.Extensions["permissions"], ","), | ||||||
|  | 		logger: log.WithFields(log.Fields{ | ||||||
|  | 			"subsystem": "sftp", | ||||||
|  | 			"username":  sc.User(), | ||||||
|  | 			"ip":        sc.RemoteAddr(), | ||||||
|  | 		}), | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Returns the sftp.Handlers for this struct.
 | ||||||
|  | func (h *Handler) Handlers() sftp.Handlers { | ||||||
|  | 	return sftp.Handlers{ | ||||||
|  | 		FileGet:  h, | ||||||
|  | 		FilePut:  h, | ||||||
|  | 		FileCmd:  h, | ||||||
|  | 		FileList: h, | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
| // Fileread creates a reader for a file on the system and returns the reader back.
 | // Fileread creates a reader for a file on the system and returns the reader back.
 | ||||||
| func (fs FileSystem) Fileread(request *sftp.Request) (io.ReaderAt, error) { | func (h *Handler) Fileread(request *sftp.Request) (io.ReaderAt, error) { | ||||||
| 	// Check first if the user can actually open and view a file. This permission is named
 | 	// Check first if the user can actually open and view a file. This permission is named
 | ||||||
| 	// really poorly, but it is checking if they can read. There is an addition permission,
 | 	// really poorly, but it is checking if they can read. There is an addition permission,
 | ||||||
| 	// "save-files" which determines if they can write that file.
 | 	// "save-files" which determines if they can write that file.
 | ||||||
| 	if !fs.can(PermissionFileReadContent) { | 	if !h.can(PermissionFileReadContent) { | ||||||
| 		return nil, sftp.ErrSshFxPermissionDenied | 		return nil, sftp.ErrSSHFxPermissionDenied | ||||||
| 	} | 	} | ||||||
| 
 | 	h.mu.Lock() | ||||||
| 	p, err := fs.buildPath(request.Filepath) | 	defer h.mu.Unlock() | ||||||
|  | 	f, _, err := h.fs.File(request.Filepath) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, sftp.ErrSshFxNoSuchFile | 		if !errors.Is(err, os.ErrNotExist) { | ||||||
|  | 			h.logger.WithField("error", err).Error("error processing readfile request") | ||||||
|  | 			return nil, sftp.ErrSSHFxFailure | ||||||
| 		} | 		} | ||||||
| 
 | 		return nil, sftp.ErrSSHFxNoSuchFile | ||||||
| 	fs.lock.Lock() |  | ||||||
| 	defer fs.lock.Unlock() |  | ||||||
| 
 |  | ||||||
| 	if _, err := os.Stat(p); os.IsNotExist(err) { |  | ||||||
| 		return nil, sftp.ErrSshFxNoSuchFile |  | ||||||
| 	} else if err != nil { |  | ||||||
| 		fs.logger.WithField("error", err).Error("error while processing file stat") |  | ||||||
| 
 |  | ||||||
| 		return nil, sftp.ErrSshFxFailure |  | ||||||
| 	} | 	} | ||||||
| 
 | 	return f, nil | ||||||
| 	file, err := os.Open(p) |  | ||||||
| 	if err != nil { |  | ||||||
| 		fs.logger.WithField("source", p).WithField("error", err).Error("could not open file for reading") |  | ||||||
| 		return nil, sftp.ErrSshFxFailure |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return file, nil |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Filewrite handles the write actions for a file on the system.
 | // Filewrite handles the write actions for a file on the system.
 | ||||||
| func (fs FileSystem) Filewrite(request *sftp.Request) (io.WriterAt, error) { | func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) { | ||||||
| 	if fs.ReadOnly { | 	if h.ro { | ||||||
| 		return nil, sftp.ErrSshFxOpUnsupported | 		return nil, sftp.ErrSSHFxOpUnsupported | ||||||
| 	} | 	} | ||||||
| 
 | 	l := h.logger.WithField("source", request.Filepath) | ||||||
| 	p, err := fs.buildPath(request.Filepath) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, sftp.ErrSshFxNoSuchFile |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	l := fs.logger.WithField("source", p) |  | ||||||
| 
 |  | ||||||
| 	// If the user doesn't have enough space left on the server it should respond with an
 | 	// If the user doesn't have enough space left on the server it should respond with an
 | ||||||
| 	// error since we won't be letting them write this file to the disk.
 | 	// error since we won't be letting them write this file to the disk.
 | ||||||
| 	if !fs.HasDiskSpace(fs) { | 	if !h.fs.HasSpaceAvailable(true) { | ||||||
| 		return nil, ErrSshQuotaExceeded | 		return nil, ErrSSHQuotaExceeded | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	fs.lock.Lock() | 	h.mu.Lock() | ||||||
| 	defer fs.lock.Unlock() | 	defer h.mu.Unlock() | ||||||
| 
 | 	// The specific permission required to perform this action. If the file exists on the
 | ||||||
| 	stat, statErr := os.Stat(p) | 	// system already it only needs to be an update, otherwise we'll check for a create.
 | ||||||
| 	// If the file doesn't exist we need to create it, as well as the directory pathway
 | 	permission := PermissionFileUpdate | ||||||
| 	// leading up to where that file will be created.
 | 	_, sterr := h.fs.Stat(request.Filepath) | ||||||
| 	if os.IsNotExist(statErr) { | 	if sterr != nil { | ||||||
| 		// This is a different pathway than just editing an existing file. If it doesn't exist already
 | 		if !errors.Is(sterr, os.ErrNotExist) { | ||||||
| 		// we need to determine if this user has permission to create files.
 | 			l.WithField("error", sterr).Error("error while getting file reader") | ||||||
| 		if !fs.can(PermissionFileCreate) { | 			return nil, sftp.ErrSSHFxFailure | ||||||
| 			return nil, sftp.ErrSshFxPermissionDenied |  | ||||||
| 		} | 		} | ||||||
| 
 | 		permission = PermissionFileCreate | ||||||
| 		// Create all of the directories leading up to the location where this file is being created.
 |  | ||||||
| 		if err := os.MkdirAll(filepath.Dir(p), 0755); err != nil { |  | ||||||
| 			l.WithFields(log.Fields{ |  | ||||||
| 				"path":  filepath.Dir(p), |  | ||||||
| 				"error": err, |  | ||||||
| 			}).Error("error making path for file") |  | ||||||
| 
 |  | ||||||
| 			return nil, sftp.ErrSshFxFailure |  | ||||||
| 	} | 	} | ||||||
| 
 | 	// Confirm the user has permission to perform this action BEFORE calling Touch, otherwise
 | ||||||
| 		file, err := os.Create(p) | 	// you'll potentially create a file on the system and then fail out because of user
 | ||||||
|  | 	// permission checking after the fact.
 | ||||||
|  | 	if !h.can(permission) { | ||||||
|  | 		return nil, sftp.ErrSSHFxPermissionDenied | ||||||
|  | 	} | ||||||
|  | 	f, err := h.fs.Touch(request.Filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 			l.WithField("error", err).Error("failed to create file") |  | ||||||
| 
 |  | ||||||
| 			return nil, sftp.ErrSshFxFailure |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Not failing here is intentional. We still made the file, it is just owned incorrectly
 |  | ||||||
| 		// and will likely cause some issues.
 |  | ||||||
| 		if err := os.Chown(p, fs.User.Uid, fs.User.Gid); err != nil { |  | ||||||
| 			l.WithField("error", err).Warn("failed to set permissions on file") |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		return file, nil |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// If the stat error isn't about the file not existing, there is some other issue
 |  | ||||||
| 	// at play and we need to go ahead and bail out of the process.
 |  | ||||||
| 	if statErr != nil { |  | ||||||
| 		l.WithField("error", statErr).Error("encountered error performing file stat") |  | ||||||
| 
 |  | ||||||
| 		return nil, sftp.ErrSshFxFailure |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// If we've made it here it means the file already exists and we don't need to do anything
 |  | ||||||
| 	// fancy to handle it. Just pass over the request flags so the system knows what the end
 |  | ||||||
| 	// goal with the file is going to be.
 |  | ||||||
| 	//
 |  | ||||||
| 	// But first, check that the user has permission to save modified files.
 |  | ||||||
| 	if !fs.can(PermissionFileUpdate) { |  | ||||||
| 		return nil, sftp.ErrSshFxPermissionDenied |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Not sure this would ever happen, but lets not find out.
 |  | ||||||
| 	if stat.IsDir() { |  | ||||||
| 		return nil, sftp.ErrSshFxOpUnsupported |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	file, err := os.Create(p) |  | ||||||
| 	if err != nil { |  | ||||||
| 		// Prevent errors if the file is deleted between the stat and this call.
 |  | ||||||
| 		if os.IsNotExist(err) { |  | ||||||
| 			return nil, sftp.ErrSSHFxNoSuchFile |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system") | 		l.WithField("flags", request.Flags).WithField("error", err).Error("failed to open existing file on system") | ||||||
| 		return nil, sftp.ErrSshFxFailure | 		return nil, sftp.ErrSSHFxFailure | ||||||
| 	} | 	} | ||||||
| 
 | 	return f, nil | ||||||
| 	// Not failing here is intentional. We still made the file, it is just owned incorrectly
 |  | ||||||
| 	// and will likely cause some issues.
 |  | ||||||
| 	if err := os.Chown(p, fs.User.Uid, fs.User.Gid); err != nil { |  | ||||||
| 		l.WithField("error", err).Warn("error chowning file") |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return file, nil |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Filecmd hander for basic SFTP system calls related to files, but not anything to do with reading
 | // Filecmd hander for basic SFTP system calls related to files, but not anything to do with reading
 | ||||||
| // or writing to those files.
 | // or writing to those files.
 | ||||||
| func (fs FileSystem) Filecmd(request *sftp.Request) error { | func (h *Handler) Filecmd(request *sftp.Request) error { | ||||||
| 	if fs.ReadOnly { | 	if h.ro { | ||||||
| 		return sftp.ErrSshFxOpUnsupported | 		return sftp.ErrSSHFxOpUnsupported | ||||||
| 	} | 	} | ||||||
| 
 | 	l := h.logger.WithField("source", request.Filepath) | ||||||
| 	p, err := fs.buildPath(request.Filepath) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return sftp.ErrSshFxNoSuchFile |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	l := fs.logger.WithField("source", p) |  | ||||||
| 
 |  | ||||||
| 	var target string |  | ||||||
| 	// If a target is provided in this request validate that it is going to the correct
 |  | ||||||
| 	// location for the server. If it is not, return an operation unsupported error. This
 |  | ||||||
| 	// is maybe not the best error response, but its not wrong either.
 |  | ||||||
| 	if request.Target != "" { | 	if request.Target != "" { | ||||||
| 		target, err = fs.buildPath(request.Target) | 		l = l.WithField("target", request.Target) | ||||||
| 		if err != nil { |  | ||||||
| 			return sftp.ErrSshFxOpUnsupported |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	switch request.Method { | 	switch request.Method { | ||||||
|  | 	// Allows a user to make changes to the permissions of a given file or directory
 | ||||||
|  | 	// on their server using their SFTP client.
 | ||||||
| 	case "Setstat": | 	case "Setstat": | ||||||
| 		if !fs.can(PermissionFileUpdate) { | 		if !h.can(PermissionFileUpdate) { | ||||||
| 			return sftp.ErrSshFxPermissionDenied | 			return sftp.ErrSSHFxPermissionDenied | ||||||
| 		} | 		} | ||||||
| 
 | 		mode := request.Attributes().FileMode().Perm() | ||||||
| 		mode := os.FileMode(0644) | 		// If the client passes an invalid FileMode just use the default 0644.
 | ||||||
| 		// If the client passed a valid file permission use that, otherwise use the
 | 		if mode == 0000 { | ||||||
| 		// default of 0644 set above.
 | 			mode = os.FileMode(0644) | ||||||
| 		if request.Attributes().FileMode().Perm() != 0000 { |  | ||||||
| 			mode = request.Attributes().FileMode().Perm() |  | ||||||
| 		} | 		} | ||||||
| 
 | 		// Force directories to be 0755.
 | ||||||
| 		// Force directories to be 0755
 |  | ||||||
| 		if request.Attributes().FileMode().IsDir() { | 		if request.Attributes().FileMode().IsDir() { | ||||||
| 			mode = 0755 | 			mode = 0755 | ||||||
| 		} | 		} | ||||||
| 
 | 		if err := h.fs.Chmod(request.Filepath, mode); err != nil { | ||||||
| 		if err := os.Chmod(p, mode); err != nil { | 			if errors.Is(err, os.ErrNotExist) { | ||||||
| 			if os.IsNotExist(err) { |  | ||||||
| 				return sftp.ErrSSHFxNoSuchFile | 				return sftp.ErrSSHFxNoSuchFile | ||||||
| 			} | 			} | ||||||
| 
 |  | ||||||
| 			l.WithField("error", err).Error("failed to perform setstat on item") | 			l.WithField("error", err).Error("failed to perform setstat on item") | ||||||
| 			return sftp.ErrSSHFxFailure | 			return sftp.ErrSSHFxFailure | ||||||
| 		} | 		} | ||||||
| 		return nil | 		break | ||||||
|  | 	// Support renaming a file (aka Move).
 | ||||||
| 	case "Rename": | 	case "Rename": | ||||||
| 		if !fs.can(PermissionFileUpdate) { | 		if !h.can(PermissionFileUpdate) { | ||||||
| 			return sftp.ErrSSHFxPermissionDenied | 			return sftp.ErrSSHFxPermissionDenied | ||||||
| 		} | 		} | ||||||
| 
 | 		if err := h.fs.Rename(request.Filepath, request.Target); err != nil { | ||||||
| 		if err := os.Rename(p, target); err != nil { | 			if errors.Is(err, os.ErrNotExist) { | ||||||
| 			if os.IsNotExist(err) { |  | ||||||
| 				return sftp.ErrSSHFxNoSuchFile | 				return sftp.ErrSSHFxNoSuchFile | ||||||
| 			} | 			} | ||||||
| 
 | 			l.WithField("error", err).Error("failed to rename file") | ||||||
| 			l.WithField("target", target).WithField("error", err).Error("failed to rename file") | 			return sftp.ErrSSHFxFailure | ||||||
| 
 |  | ||||||
| 			return sftp.ErrSshFxFailure |  | ||||||
| 		} | 		} | ||||||
| 
 |  | ||||||
| 		break | 		break | ||||||
|  | 	// Handle deletion of a directory. This will properly delete all of the files and
 | ||||||
|  | 	// folders within that directory if it is not already empty (unlike a lot of SFTP
 | ||||||
|  | 	// clients that must delete each file individually).
 | ||||||
| 	case "Rmdir": | 	case "Rmdir": | ||||||
| 		if !fs.can(PermissionFileDelete) { | 		if !h.can(PermissionFileDelete) { | ||||||
| 			return sftp.ErrSshFxPermissionDenied | 			return sftp.ErrSSHFxPermissionDenied | ||||||
| 		} | 		} | ||||||
| 
 | 		if err := h.fs.Delete(request.Filepath); err != nil { | ||||||
| 		if err := os.RemoveAll(p); err != nil { |  | ||||||
| 			l.WithField("error", err).Error("failed to remove directory") | 			l.WithField("error", err).Error("failed to remove directory") | ||||||
| 
 | 			return sftp.ErrSSHFxFailure | ||||||
| 			return sftp.ErrSshFxFailure |  | ||||||
| 		} | 		} | ||||||
| 
 | 		return sftp.ErrSSHFxOk | ||||||
| 		return sftp.ErrSshFxOk | 	// Handle requests to create a new Directory.
 | ||||||
| 	case "Mkdir": | 	case "Mkdir": | ||||||
| 		if !fs.can(PermissionFileCreate) { | 		if !h.can(PermissionFileCreate) { | ||||||
| 			return sftp.ErrSshFxPermissionDenied | 			return sftp.ErrSSHFxPermissionDenied | ||||||
| 		} | 		} | ||||||
| 
 | 		name := strings.Split(filepath.Clean(request.Filepath), "/") | ||||||
| 		if err := os.MkdirAll(p, 0755); err != nil { | 		err := h.fs.CreateDirectory(name[len(name)-1], strings.Join(name[0:len(name)-1], "/")) | ||||||
|  | 		if err != nil { | ||||||
| 			l.WithField("error", err).Error("failed to create directory") | 			l.WithField("error", err).Error("failed to create directory") | ||||||
| 
 | 			return sftp.ErrSSHFxFailure | ||||||
| 			return sftp.ErrSshFxFailure |  | ||||||
| 		} | 		} | ||||||
| 
 |  | ||||||
| 		break | 		break | ||||||
|  | 	// Support creating symlinks between files. The source and target must resolve within
 | ||||||
|  | 	// the server home directory.
 | ||||||
| 	case "Symlink": | 	case "Symlink": | ||||||
| 		if !fs.can(PermissionFileCreate) { | 		if !h.can(PermissionFileCreate) { | ||||||
| 			return sftp.ErrSshFxPermissionDenied | 			return sftp.ErrSSHFxPermissionDenied | ||||||
| 		} | 		} | ||||||
| 
 | 		source, err := h.fs.SafePath(request.Filepath) | ||||||
| 		if err := os.Symlink(p, target); err != nil { | 		if err != nil { | ||||||
| 			l.WithField("target", target).WithField("error", err).Error("failed to create symlink") | 			return sftp.ErrSSHFxNoSuchFile | ||||||
| 
 | 		} | ||||||
| 			return sftp.ErrSshFxFailure | 		target, err := h.fs.SafePath(request.Target) | ||||||
| 		} | 		if err != nil { | ||||||
| 
 | 			return sftp.ErrSSHFxNoSuchFile | ||||||
| 		break | 		} | ||||||
| 	case "Remove": | 		if err := os.Symlink(source, target); err != nil { | ||||||
| 		if !fs.can(PermissionFileDelete) { | 			l.WithField("target", target).WithField("error", err).Error("failed to create symlink") | ||||||
| 			return sftp.ErrSshFxPermissionDenied | 			return sftp.ErrSSHFxFailure | ||||||
| 		} | 		} | ||||||
| 
 | 		break | ||||||
| 		if err := os.Remove(p); err != nil { | 	// Called when deleting a file.
 | ||||||
| 			if os.IsNotExist(err) { | 	case "Remove": | ||||||
|  | 		if !h.can(PermissionFileDelete) { | ||||||
|  | 			return sftp.ErrSSHFxPermissionDenied | ||||||
|  | 		} | ||||||
|  | 		if err := h.fs.Delete(request.Filepath); err != nil { | ||||||
|  | 			if errors.Is(err, os.ErrNotExist) { | ||||||
| 				return sftp.ErrSSHFxNoSuchFile | 				return sftp.ErrSSHFxNoSuchFile | ||||||
| 			} | 			} | ||||||
| 
 |  | ||||||
| 			l.WithField("error", err).Error("failed to remove a file") | 			l.WithField("error", err).Error("failed to remove a file") | ||||||
| 
 | 			return sftp.ErrSSHFxFailure | ||||||
| 			return sftp.ErrSshFxFailure |  | ||||||
| 		} | 		} | ||||||
| 
 | 		return sftp.ErrSSHFxOk | ||||||
| 		return sftp.ErrSshFxOk |  | ||||||
| 	default: | 	default: | ||||||
| 		return sftp.ErrSshFxOpUnsupported | 		return sftp.ErrSSHFxOpUnsupported | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	var fileLocation = p | 	target := request.Filepath | ||||||
| 	if target != "" { | 	if request.Target != "" { | ||||||
| 		fileLocation = target | 		target = request.Target | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	// Not failing here is intentional. We still made the file, it is just owned incorrectly
 | 	// Not failing here is intentional. We still made the file, it is just owned incorrectly
 | ||||||
| 	// and will likely cause some issues. There is no logical check for if the file was removed
 | 	// and will likely cause some issues. There is no logical check for if the file was removed
 | ||||||
| 	// because both of those cases (Rmdir, Remove) have an explicit return rather than break.
 | 	// because both of those cases (Rmdir, Remove) have an explicit return rather than break.
 | ||||||
| 	if err := os.Chown(fileLocation, fs.User.Uid, fs.User.Gid); err != nil { | 	if err := h.fs.Chown(target); err != nil { | ||||||
| 		l.WithField("error", err).Warn("error chowning file") | 		l.WithField("error", err).Warn("error chowning file") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return sftp.ErrSshFxOk | 	return sftp.ErrSSHFxOk | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Filelist is the handler for SFTP filesystem list calls. This will handle calls to list the contents of
 | // Filelist is the handler for SFTP filesystem list calls. This will handle calls to list the contents of
 | ||||||
| // a directory as well as perform file/folder stat calls.
 | // a directory as well as perform file/folder stat calls.
 | ||||||
| func (fs FileSystem) Filelist(request *sftp.Request) (sftp.ListerAt, error) { | func (h *Handler) Filelist(request *sftp.Request) (sftp.ListerAt, error) { | ||||||
| 	p, err := fs.buildPath(request.Filepath) | 	if !h.can(PermissionFileRead) { | ||||||
| 	if err != nil { | 		return nil, sftp.ErrSSHFxPermissionDenied | ||||||
| 		return nil, sftp.ErrSshFxNoSuchFile |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	switch request.Method { | 	switch request.Method { | ||||||
| 	case "List": | 	case "List": | ||||||
| 		if !fs.can(PermissionFileRead) { | 		p, err := h.fs.SafePath(request.Filepath) | ||||||
| 			return nil, sftp.ErrSshFxPermissionDenied | 		if err != nil { | ||||||
|  | 			return nil, sftp.ErrSSHFxNoSuchFile | ||||||
| 		} | 		} | ||||||
| 
 |  | ||||||
| 		files, err := ioutil.ReadDir(p) | 		files, err := ioutil.ReadDir(p) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			fs.logger.WithField("error", err).Error("error while listing directory") | 			h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory") | ||||||
| 
 | 
 | ||||||
| 			return nil, sftp.ErrSshFxFailure | 			return nil, sftp.ErrSSHFxFailure | ||||||
| 		} | 		} | ||||||
| 
 |  | ||||||
| 		return ListerAt(files), nil | 		return ListerAt(files), nil | ||||||
| 	case "Stat": | 	case "Stat": | ||||||
| 		if !fs.can(PermissionFileRead) { | 		st, err := h.fs.Stat(request.Filepath) | ||||||
| 			return nil, sftp.ErrSshFxPermissionDenied | 		if err != nil { | ||||||
|  | 			if errors.Is(err, os.ErrNotExist) { | ||||||
|  | 				return nil, sftp.ErrSSHFxNoSuchFile | ||||||
| 			} | 			} | ||||||
| 
 | 			h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error performing stat on file") | ||||||
| 		s, err := os.Stat(p) | 			return nil, sftp.ErrSSHFxFailure | ||||||
| 		if os.IsNotExist(err) { |  | ||||||
| 			return nil, sftp.ErrSshFxNoSuchFile |  | ||||||
| 		} else if err != nil { |  | ||||||
| 			fs.logger.WithField("source", p).WithField("error", err).Error("error performing stat on file") |  | ||||||
| 
 |  | ||||||
| 			return nil, sftp.ErrSshFxFailure |  | ||||||
| 		} | 		} | ||||||
| 
 | 		return ListerAt([]os.FileInfo{st.FileInfo}), nil | ||||||
| 		return ListerAt([]os.FileInfo{s}), nil |  | ||||||
| 	default: | 	default: | ||||||
| 		// Before adding readlink support we need to evaluate any potential security risks
 | 		return nil, sftp.ErrSSHFxOpUnsupported | ||||||
| 		// as a result of navigating around to a location that is outside the home directory
 |  | ||||||
| 		// for the logged in user. I don't foresee it being much of a problem, but I do want to
 |  | ||||||
| 		// check it out before slapping some code here. Until then, we'll just return an
 |  | ||||||
| 		// unsupported response code.
 |  | ||||||
| 		return nil, sftp.ErrSshFxOpUnsupported |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Determines if a user has permission to perform a specific action on the SFTP server. These
 | // Determines if a user has permission to perform a specific action on the SFTP server. These
 | ||||||
| // permissions are defined and returned by the Panel API.
 | // permissions are defined and returned by the Panel API.
 | ||||||
| func (fs FileSystem) can(permission string) bool { | func (h *Handler) can(permission string) bool { | ||||||
| 	// Server owners and super admins have their permissions returned as '[*]' via the Panel
 | 	// SFTPServer owners and super admins have their permissions returned as '[*]' via the Panel
 | ||||||
| 	// API, so for the sake of speed do an initial check for that before iterating over the
 | 	// API, so for the sake of speed do an initial check for that before iterating over the
 | ||||||
| 	// entire array of permissions.
 | 	// entire array of permissions.
 | ||||||
| 	if len(fs.Permissions) == 1 && fs.Permissions[0] == "*" { | 	if len(h.permissions) == 1 && h.permissions[0] == "*" { | ||||||
| 		return true | 		return true | ||||||
| 	} | 	} | ||||||
| 
 | 	for _, p := range h.permissions { | ||||||
| 	// Not the owner or an admin, loop over the permissions that were returned to determine
 |  | ||||||
| 	// if they have the passed permission.
 |  | ||||||
| 	for _, p := range fs.Permissions { |  | ||||||
| 		if p == permission { | 		if p == permission { | ||||||
| 			return true | 			return true | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 |  | ||||||
| 	return false | 	return false | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										328
									
								
								sftp/server.go
									
									
									
									
									
								
							
							
						
						
									
										328
									
								
								sftp/server.go
									
									
									
									
									
								
							|  | @ -5,69 +5,185 @@ import ( | ||||||
| 	"crypto/rsa" | 	"crypto/rsa" | ||||||
| 	"crypto/x509" | 	"crypto/x509" | ||||||
| 	"encoding/pem" | 	"encoding/pem" | ||||||
| 	"fmt" |  | ||||||
| 	"io" | 	"io" | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| 	"net" | 	"net" | ||||||
| 	"os" | 	"os" | ||||||
| 	"path" | 	"path" | ||||||
|  | 	"strconv" | ||||||
| 	"strings" | 	"strings" | ||||||
| 
 | 
 | ||||||
|  | 	"emperror.dev/errors" | ||||||
| 	"github.com/apex/log" | 	"github.com/apex/log" | ||||||
| 	"github.com/patrickmn/go-cache" |  | ||||||
| 	"github.com/pkg/sftp" | 	"github.com/pkg/sftp" | ||||||
| 	"github.com/pterodactyl/wings/api" | 	"github.com/pterodactyl/wings/api" | ||||||
|  | 	"github.com/pterodactyl/wings/config" | ||||||
| 	"github.com/pterodactyl/wings/server" | 	"github.com/pterodactyl/wings/server" | ||||||
| 	"golang.org/x/crypto/ssh" | 	"golang.org/x/crypto/ssh" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type Settings struct { | //goland:noinspection GoNameStartsWithPackageName
 | ||||||
|  | type SFTPServer struct { | ||||||
| 	BasePath string | 	BasePath string | ||||||
| 	ReadOnly bool | 	ReadOnly bool | ||||||
| 	BindPort    int | 	Listen   string | ||||||
| 	BindAddress string |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type User struct { | func New() *SFTPServer { | ||||||
| 	Uid int | 	cfg := config.Get().System | ||||||
| 	Gid int | 	return &SFTPServer{ | ||||||
|  | 		BasePath: cfg.Data, | ||||||
|  | 		ReadOnly: cfg.Sftp.ReadOnly, | ||||||
|  | 		Listen:   cfg.Sftp.Address + ":" + strconv.Itoa(cfg.Sftp.Port), | ||||||
|  | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type Server struct { | // Starts the SFTP server and add a persistent listener to handle inbound SFTP connections.
 | ||||||
| 	cache *cache.Cache | func (c *SFTPServer) Run() error { | ||||||
| 
 | 	if _, err := os.Stat(path.Join(c.BasePath, ".sftp/id_rsa")); os.IsNotExist(err) { | ||||||
| 	Settings Settings | 		if err := c.generatePrivateKey(); err != nil { | ||||||
| 	User     User | 			return err | ||||||
| 
 | 		} | ||||||
| 	serverManager server.Manager | 	} else if err != nil { | ||||||
| 
 | 		return errors.Wrap(err, "sftp/server: could not stat private key file") | ||||||
| 	PathValidator      func(fs FileSystem, p string) (string, error) | 	} | ||||||
| 	DiskSpaceValidator func(fs FileSystem) bool | 	pb, err := ioutil.ReadFile(path.Join(c.BasePath, ".sftp/id_rsa")) | ||||||
| 
 | 	if err != nil { | ||||||
| 	// Validator function that is called when a user connects to the server. This should
 | 		return errors.Wrap(err, "sftp/server: could not read private key file") | ||||||
| 	// check against whatever system is desired to confirm if the given username and password
 | 	} | ||||||
| 	// combination is valid. If so, should return an authentication response.
 | 	private, err := ssh.ParsePrivateKey(pb) | ||||||
| 	CredentialValidator func(r api.SftpAuthRequest) (*api.SftpAuthResponse, error) | 	if err != nil { | ||||||
|  | 		return err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| // Initialize the SFTP server and add a persistent listener to handle inbound SFTP connections.
 | 	conf := &ssh.ServerConfig{ | ||||||
| func (c *Server) Initialize() error { |  | ||||||
| 	serverConfig := &ssh.ServerConfig{ |  | ||||||
| 		NoClientAuth:     false, | 		NoClientAuth:     false, | ||||||
| 		MaxAuthTries:     6, | 		MaxAuthTries:     6, | ||||||
| 		PasswordCallback: func(conn ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { | 		PasswordCallback: c.passwordCallback, | ||||||
| 			resp, err := c.CredentialValidator(api.SftpAuthRequest{ | 	} | ||||||
|  | 	conf.AddHostKey(private) | ||||||
|  | 
 | ||||||
|  | 	listener, err := net.Listen("tcp", c.Listen) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	log.WithField("listen", c.Listen).Info("sftp server listening for connections") | ||||||
|  | 	for { | ||||||
|  | 		if conn, _ := listener.Accept(); conn != nil { | ||||||
|  | 			go func(conn net.Conn) { | ||||||
|  | 				defer conn.Close() | ||||||
|  | 				c.AcceptInbound(conn, conf) | ||||||
|  | 			}(conn) | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Handles an inbound connection to the instance and determines if we should serve the
 | ||||||
|  | // request or not.
 | ||||||
|  | func (c SFTPServer) AcceptInbound(conn net.Conn, config *ssh.ServerConfig) { | ||||||
|  | 	// Before beginning a handshake must be performed on the incoming net.Conn
 | ||||||
|  | 	sconn, chans, reqs, err := ssh.NewServerConn(conn, config) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 	defer sconn.Close() | ||||||
|  | 	go ssh.DiscardRequests(reqs) | ||||||
|  | 
 | ||||||
|  | 	for ch := range chans { | ||||||
|  | 		// If its not a session channel we just move on because its not something we
 | ||||||
|  | 		// know how to handle at this point.
 | ||||||
|  | 		if ch.ChannelType() != "session" { | ||||||
|  | 			ch.Reject(ssh.UnknownChannelType, "unknown channel type") | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		channel, requests, err := ch.Accept() | ||||||
|  | 		if err != nil { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		go func(in <-chan *ssh.Request) { | ||||||
|  | 			for req := range in { | ||||||
|  | 				// Channels have a type that is dependent on the protocol. For SFTP
 | ||||||
|  | 				// this is "subsystem" with a payload that (should) be "sftp". Discard
 | ||||||
|  | 				// anything else we receive ("pty", "shell", etc)
 | ||||||
|  | 				req.Reply(req.Type == "subsystem" && string(req.Payload[4:]) == "sftp", nil) | ||||||
|  | 			} | ||||||
|  | 		}(requests) | ||||||
|  | 
 | ||||||
|  | 		// If no UUID has been set on this inbound request then we can assume we
 | ||||||
|  | 		// have screwed up something in the authentication code. This is a sanity
 | ||||||
|  | 		// check, but should never be encountered (ideally...).
 | ||||||
|  | 		//
 | ||||||
|  | 		// This will also attempt to match a specific server out of the global server
 | ||||||
|  | 		// store and return nil if there is no match.
 | ||||||
|  | 		uuid := sconn.Permissions.Extensions["uuid"] | ||||||
|  | 		srv := server.GetServers().Find(func(s *server.Server) bool { | ||||||
|  | 			if uuid == "" { | ||||||
|  | 				return false | ||||||
|  | 			} | ||||||
|  | 			return s.Id() == uuid | ||||||
|  | 		}) | ||||||
|  | 		if srv == nil { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// Spin up a SFTP server instance for the authenticated user's server allowing
 | ||||||
|  | 		// them access to the underlying filesystem.
 | ||||||
|  | 		handler := sftp.NewRequestServer(channel, NewHandler(sconn, srv.Filesystem()).Handlers()) | ||||||
|  | 		if err := handler.Serve(); err == io.EOF { | ||||||
|  | 			handler.Close() | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Generates a private key that will be used by the SFTP server.
 | ||||||
|  | func (c *SFTPServer) generatePrivateKey() error { | ||||||
|  | 	key, err := rsa.GenerateKey(rand.Reader, 2048) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return errors.WithStack(err) | ||||||
|  | 	} | ||||||
|  | 	if err := os.MkdirAll(path.Join(c.BasePath, ".sftp"), 0755); err != nil { | ||||||
|  | 		return errors.Wrap(err, "sftp/server: could not create .sftp directory") | ||||||
|  | 	} | ||||||
|  | 	o, err := os.OpenFile(path.Join(c.BasePath, ".sftp/id_rsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return errors.WithStack(err) | ||||||
|  | 	} | ||||||
|  | 	defer o.Close() | ||||||
|  | 
 | ||||||
|  | 	err = pem.Encode(o, &pem.Block{ | ||||||
|  | 		Type:  "RSA PRIVATE KEY", | ||||||
|  | 		Bytes: x509.MarshalPKCS1PrivateKey(key), | ||||||
|  | 	}) | ||||||
|  | 	return errors.WithStack(err) | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // A function capable of validating user credentials with the Panel API.
 | ||||||
|  | func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { | ||||||
|  | 	request := api.SftpAuthRequest{ | ||||||
| 		User:          conn.User(), | 		User:          conn.User(), | ||||||
| 		Pass:          string(pass), | 		Pass:          string(pass), | ||||||
| 		IP:            conn.RemoteAddr().String(), | 		IP:            conn.RemoteAddr().String(), | ||||||
| 		SessionID:     conn.SessionID(), | 		SessionID:     conn.SessionID(), | ||||||
| 		ClientVersion: conn.ClientVersion(), | 		ClientVersion: conn.ClientVersion(), | ||||||
| 			}) | 	} | ||||||
| 
 | 
 | ||||||
|  | 	logger := log.WithFields(log.Fields{"subsystem": "sftp", "username": conn.User(), "ip": conn.RemoteAddr().String()}) | ||||||
|  | 	logger.Debug("validating credentials for SFTP connection") | ||||||
|  | 
 | ||||||
|  | 	resp, err := api.New().ValidateSftpCredentials(request) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
|  | 		if api.IsInvalidCredentialsError(err) { | ||||||
|  | 			logger.Warn("failed to validate user credentials (invalid username or password)") | ||||||
|  | 		} else { | ||||||
|  | 			logger.WithField("error", err).Error("encountered an error while trying to validate user credentials") | ||||||
|  | 		} | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | 	logger.WithField("server", resp.Server).Debug("credentials validated and matched to server instance") | ||||||
| 	sshPerm := &ssh.Permissions{ | 	sshPerm := &ssh.Permissions{ | ||||||
| 		Extensions: map[string]string{ | 		Extensions: map[string]string{ | ||||||
| 			"uuid":        resp.Server, | 			"uuid":        resp.Server, | ||||||
|  | @ -77,158 +193,4 @@ func (c *Server) Initialize() error { | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return sshPerm, nil | 	return sshPerm, nil | ||||||
| 		}, |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if _, err := os.Stat(path.Join(c.Settings.BasePath, ".sftp/id_rsa")); os.IsNotExist(err) { |  | ||||||
| 		if err := c.generatePrivateKey(); err != nil { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 	} else if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	privateBytes, err := ioutil.ReadFile(path.Join(c.Settings.BasePath, ".sftp/id_rsa")) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	private, err := ssh.ParsePrivateKey(privateBytes) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Add our private key to the server configuration.
 |  | ||||||
| 	serverConfig.AddHostKey(private) |  | ||||||
| 
 |  | ||||||
| 	listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", c.Settings.BindAddress, c.Settings.BindPort)) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	log.WithField("host", c.Settings.BindAddress).WithField("port", c.Settings.BindPort).Info("sftp subsystem listening for connections") |  | ||||||
| 
 |  | ||||||
| 	for { |  | ||||||
| 		conn, _ := listener.Accept() |  | ||||||
| 		if conn != nil { |  | ||||||
| 			go c.AcceptInboundConnection(conn, serverConfig) |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Handles an inbound connection to the instance and determines if we should serve the request
 |  | ||||||
| // or not.
 |  | ||||||
| func (c Server) AcceptInboundConnection(conn net.Conn, config *ssh.ServerConfig) { |  | ||||||
| 	defer conn.Close() |  | ||||||
| 
 |  | ||||||
| 	// Before beginning a handshake must be performed on the incoming net.Conn
 |  | ||||||
| 	sconn, chans, reqs, err := ssh.NewServerConn(conn, config) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return |  | ||||||
| 	} |  | ||||||
| 	defer sconn.Close() |  | ||||||
| 
 |  | ||||||
| 	go ssh.DiscardRequests(reqs) |  | ||||||
| 
 |  | ||||||
| 	for newChannel := range chans { |  | ||||||
| 		// If its not a session channel we just move on because its not something we
 |  | ||||||
| 		// know how to handle at this point.
 |  | ||||||
| 		if newChannel.ChannelType() != "session" { |  | ||||||
| 			newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		channel, requests, err := newChannel.Accept() |  | ||||||
| 		if err != nil { |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Channels have a type that is dependent on the protocol. For SFTP this is "subsystem"
 |  | ||||||
| 		// with a payload that (should) be "sftp". Discard anything else we receive ("pty", "shell", etc)
 |  | ||||||
| 		go func(in <-chan *ssh.Request) { |  | ||||||
| 			for req := range in { |  | ||||||
| 				ok := false |  | ||||||
| 
 |  | ||||||
| 				switch req.Type { |  | ||||||
| 				case "subsystem": |  | ||||||
| 					if string(req.Payload[4:]) == "sftp" { |  | ||||||
| 						ok = true |  | ||||||
| 					} |  | ||||||
| 				} |  | ||||||
| 
 |  | ||||||
| 				req.Reply(ok, nil) |  | ||||||
| 			} |  | ||||||
| 		}(requests) |  | ||||||
| 
 |  | ||||||
| 		// Configure the user's home folder for the rest of the request cycle.
 |  | ||||||
| 		if sconn.Permissions.Extensions["uuid"] == "" { |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		// Create a new handler for the currently logged in user's server.
 |  | ||||||
| 		fs := c.createHandler(sconn) |  | ||||||
| 
 |  | ||||||
| 		// Create the server instance for the channel using the filesystem we created above.
 |  | ||||||
| 		server := sftp.NewRequestServer(channel, fs) |  | ||||||
| 
 |  | ||||||
| 		if err := server.Serve(); err == io.EOF { |  | ||||||
| 			server.Close() |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Creates a new SFTP handler for a given server. The directory argument should
 |  | ||||||
| // be the base directory for a server. All actions done on the server will be
 |  | ||||||
| // relative to that directory, and the user will not be able to escape out of it.
 |  | ||||||
| func (c Server) createHandler(sc *ssh.ServerConn) sftp.Handlers { |  | ||||||
| 	p := FileSystem{ |  | ||||||
| 		UUID:          sc.Permissions.Extensions["uuid"], |  | ||||||
| 		Permissions:   strings.Split(sc.Permissions.Extensions["permissions"], ","), |  | ||||||
| 		ReadOnly:      c.Settings.ReadOnly, |  | ||||||
| 		Cache:         c.cache, |  | ||||||
| 		User:          c.User, |  | ||||||
| 		HasDiskSpace:  c.DiskSpaceValidator, |  | ||||||
| 		PathValidator: c.PathValidator, |  | ||||||
| 		logger: log.WithFields(log.Fields{ |  | ||||||
| 			"subsystem": "sftp", |  | ||||||
| 			"username":  sc.User(), |  | ||||||
| 			"ip":        sc.RemoteAddr(), |  | ||||||
| 		}), |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return sftp.Handlers{ |  | ||||||
| 		FileGet:  p, |  | ||||||
| 		FilePut:  p, |  | ||||||
| 		FileCmd:  p, |  | ||||||
| 		FileList: p, |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Generates a private key that will be used by the SFTP server.
 |  | ||||||
| func (c Server) generatePrivateKey() error { |  | ||||||
| 	key, err := rsa.GenerateKey(rand.Reader, 2048) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := os.MkdirAll(path.Join(c.Settings.BasePath, ".sftp"), 0755); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	o, err := os.OpenFile(path.Join(c.Settings.BasePath, ".sftp/id_rsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 	defer o.Close() |  | ||||||
| 
 |  | ||||||
| 	pkey := &pem.Block{ |  | ||||||
| 		Type:  "RSA PRIVATE KEY", |  | ||||||
| 		Bytes: x509.MarshalPKCS1PrivateKey(key), |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	if err := pem.Encode(o, pkey); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	return nil |  | ||||||
| } | } | ||||||
|  |  | ||||||
							
								
								
									
										85
									
								
								sftp/sftp.go
									
									
									
									
									
								
							
							
						
						
									
										85
									
								
								sftp/sftp.go
									
									
									
									
									
								
							|  | @ -1,85 +0,0 @@ | ||||||
| package sftp |  | ||||||
| 
 |  | ||||||
| import ( |  | ||||||
| 	"time" |  | ||||||
| 
 |  | ||||||
| 	"emperror.dev/errors" |  | ||||||
| 	"github.com/apex/log" |  | ||||||
| 	"github.com/patrickmn/go-cache" |  | ||||||
| 	"github.com/pterodactyl/wings/api" |  | ||||||
| 	"github.com/pterodactyl/wings/config" |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| var noMatchingServerError = errors.New("no matching server with that UUID was found") |  | ||||||
| 
 |  | ||||||
| func Initialize(config config.SystemConfiguration) error { |  | ||||||
| 	s := &Server{ |  | ||||||
| 		User: User{ |  | ||||||
| 			Uid: config.User.Uid, |  | ||||||
| 			Gid: config.User.Gid, |  | ||||||
| 		}, |  | ||||||
| 		Settings: Settings{ |  | ||||||
| 			BasePath:    config.Data, |  | ||||||
| 			ReadOnly:    config.Sftp.ReadOnly, |  | ||||||
| 			BindAddress: config.Sftp.Address, |  | ||||||
| 			BindPort:    config.Sftp.Port, |  | ||||||
| 		}, |  | ||||||
| 		cache: cache.New(5*time.Minute, 10*time.Minute), |  | ||||||
| 	} |  | ||||||
| 	s.CredentialValidator = s.validateCredentials |  | ||||||
| 	s.PathValidator = s.validatePath |  | ||||||
| 	s.DiskSpaceValidator = s.validateDiskSpace |  | ||||||
| 
 |  | ||||||
| 	// Initialize the SFTP server in a background thread since this is
 |  | ||||||
| 	// a long running operation.
 |  | ||||||
| 	go func(s *Server) { |  | ||||||
| 		if err := s.Initialize(); err != nil { |  | ||||||
| 			log.WithField("subsystem", "sftp").WithField("error", err).Error("failed to initialize SFTP subsystem") |  | ||||||
| 		} |  | ||||||
| 	}(s) |  | ||||||
| 
 |  | ||||||
| 	return nil |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *Server) validatePath(fs FileSystem, p string) (string, error) { |  | ||||||
| 	srv := s.serverManager.Get(fs.UUID) |  | ||||||
| 	if srv == nil { |  | ||||||
| 		return "", noMatchingServerError |  | ||||||
| 	} |  | ||||||
| 	return srv.Filesystem().SafePath(p) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (s *Server) validateDiskSpace(fs FileSystem) bool { |  | ||||||
| 	srv := s.serverManager.Get(fs.UUID) |  | ||||||
| 	if srv == nil { |  | ||||||
| 		return false |  | ||||||
| 	} |  | ||||||
| 	return srv.Filesystem().HasSpaceAvailable(true) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // Validates a set of credentials for a SFTP login against Pterodactyl Panel and returns
 |  | ||||||
| // the server's UUID if the credentials were valid.
 |  | ||||||
| func (s *Server) validateCredentials(c api.SftpAuthRequest) (*api.SftpAuthResponse, error) { |  | ||||||
| 	f := log.Fields{"subsystem": "sftp", "username": c.User, "ip": c.IP} |  | ||||||
| 
 |  | ||||||
| 	log.WithFields(f).Debug("validating credentials for SFTP connection") |  | ||||||
| 	resp, err := api.New().ValidateSftpCredentials(c) |  | ||||||
| 	if err != nil { |  | ||||||
| 		if api.IsInvalidCredentialsError(err) { |  | ||||||
| 			log.WithFields(f).Warn("failed to validate user credentials (invalid username or password)") |  | ||||||
| 		} else { |  | ||||||
| 			log.WithFields(f).Error("encountered an error while trying to validate user credentials") |  | ||||||
| 		} |  | ||||||
| 
 |  | ||||||
| 		return resp, err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	srv := s.serverManager.Get(resp.Server) |  | ||||||
| 	if srv == nil { |  | ||||||
| 		return resp, noMatchingServerError |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	srv.Log().WithFields(f).Debug("credentials successfully validated and matched user to server instance") |  | ||||||
| 
 |  | ||||||
| 	return resp, err |  | ||||||
| } |  | ||||||
|  | @ -5,6 +5,13 @@ import ( | ||||||
| 	"os" | 	"os" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | const ( | ||||||
|  | 	// Extends the default SFTP server to return a quota exceeded error to the client.
 | ||||||
|  | 	//
 | ||||||
|  | 	// @see https://tools.ietf.org/id/draft-ietf-secsh-filexfer-13.txt
 | ||||||
|  | 	ErrSSHQuotaExceeded = fxerr(15) | ||||||
|  | ) | ||||||
|  | 
 | ||||||
| type ListerAt []os.FileInfo | type ListerAt []os.FileInfo | ||||||
| 
 | 
 | ||||||
| // Returns the number of entries copied and an io.EOF error if we made it to the end of the file list.
 | // Returns the number of entries copied and an io.EOF error if we made it to the end of the file list.
 | ||||||
|  | @ -20,3 +27,14 @@ func (l ListerAt) ListAt(f []os.FileInfo, offset int64) (int, error) { | ||||||
| 		return n, nil | 		return n, nil | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | type fxerr uint32 | ||||||
|  | 
 | ||||||
|  | func (e fxerr) Error() string { | ||||||
|  | 	switch e { | ||||||
|  | 	case ErrSSHQuotaExceeded: | ||||||
|  | 		return "Quota Exceeded" | ||||||
|  | 	default: | ||||||
|  | 		return "Failure" | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -7,14 +7,35 @@ import ( | ||||||
| 	"encoding/json" | 	"encoding/json" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"io" | 	"io" | ||||||
|  | 	"strconv" | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"sync" | 	"sync" | ||||||
| 	"time" | 	"time" | ||||||
|  | 
 | ||||||
|  | 	"emperror.dev/errors" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| var cr = []byte(" \r") | var cr = []byte(" \r") | ||||||
| var crr = []byte("\r\n") | var crr = []byte("\r\n") | ||||||
| 
 | 
 | ||||||
|  | // FirstNotEmpty returns the first string passed in that is not an empty value.
 | ||||||
|  | func FirstNotEmpty(v ...string) string { | ||||||
|  | 	for _, val := range v { | ||||||
|  | 		if val != "" { | ||||||
|  | 			return val | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return "" | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func MustInt(v string) int { | ||||||
|  | 	i, err := strconv.Atoi(v) | ||||||
|  | 	if err != nil { | ||||||
|  | 		panic(errors.Wrap(err, "system/utils: could not parse int")) | ||||||
|  | 	} | ||||||
|  | 	return i | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func ScanReader(r io.Reader, callback func(line string)) error { | func ScanReader(r io.Reader, callback func(line string)) error { | ||||||
| 	br := bufio.NewReader(r) | 	br := bufio.NewReader(r) | ||||||
| 	// Avoid constantly re-allocating memory when we're flooding lines through this
 | 	// Avoid constantly re-allocating memory when we're flooding lines through this
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue
	
	Block a user