Merge branch 'develop' into v2
This commit is contained in:
commit
f1344f1a82
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
|
@ -34,7 +34,7 @@ jobs:
|
||||||
- name: Get Build Information
|
- name: Get Build Information
|
||||||
id: build_info
|
id: build_info
|
||||||
run: |
|
run: |
|
||||||
echo "::set-output name=version_tag::${GITHUB_REF/refs\/tags\//}"
|
echo "::set-output name=version_tag::${GITHUB_REF/refs\/tags\/v/}"
|
||||||
echo "::set-output name=short_sha::$(git rev-parse --short HEAD)"
|
echo "::set-output name=short_sha::$(git rev-parse --short HEAD)"
|
||||||
- name: Release Production Build
|
- name: Release Production Build
|
||||||
uses: docker/build-push-action@v2
|
uses: docker/build-push-action@v2
|
||||||
|
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
|
@ -45,7 +45,7 @@ jobs:
|
||||||
git config --local user.name "Pterodactyl CI"
|
git config --local user.name "Pterodactyl CI"
|
||||||
git checkout -b $BRANCH
|
git checkout -b $BRANCH
|
||||||
git push -u origin $BRANCH
|
git push -u origin $BRANCH
|
||||||
sed -i "s/ Version = \".*\"/ Version = \"${REF:11}\"/" system/const.go
|
sed -i "s/var Version = \".*\"/var Version = \"${REF:11}\"/" system/const.go
|
||||||
git add system/const.go
|
git add system/const.go
|
||||||
git commit -m "bump version for release"
|
git commit -m "bump version for release"
|
||||||
git push
|
git push
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
@ -19,16 +19,14 @@ import (
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var configureArgs struct {
|
||||||
configureArgs struct {
|
|
||||||
PanelURL string
|
PanelURL string
|
||||||
Token string
|
Token string
|
||||||
ConfigPath string
|
ConfigPath string
|
||||||
Node string
|
Node string
|
||||||
Override bool
|
Override bool
|
||||||
AllowInsecure bool
|
AllowInsecure bool
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
var nodeIdRegex = regexp.MustCompile(`^(\d+)$`)
|
var nodeIdRegex = regexp.MustCompile(`^(\d+)$`)
|
||||||
|
|
||||||
|
@ -140,13 +138,13 @@ func configureCmdRun(cmd *cobra.Command, args []string) {
|
||||||
fmt.Println("The authentication credentials provided were not valid.")
|
fmt.Println("The authentication credentials provided were not valid.")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
} else if res.StatusCode != http.StatusOK {
|
} else if res.StatusCode != http.StatusOK {
|
||||||
b, _ := ioutil.ReadAll(res.Body)
|
b, _ := io.ReadAll(res.Body)
|
||||||
|
|
||||||
fmt.Println("An error occurred while processing this request.\n", string(b))
|
fmt.Println("An error occurred while processing this request.\n", string(b))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := ioutil.ReadAll(res.Body)
|
b, err := io.ReadAll(res.Body)
|
||||||
|
|
||||||
cfg, err := config.NewAtPath(configPath)
|
cfg, err := config.NewAtPath(configPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
@ -29,18 +28,18 @@ import (
|
||||||
"github.com/pterodactyl/wings/system"
|
"github.com/pterodactyl/wings/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
const DefaultHastebinUrl = "https://ptero.co"
|
const (
|
||||||
const DefaultLogLines = 200
|
DefaultHastebinUrl = "https://ptero.co"
|
||||||
|
DefaultLogLines = 200
|
||||||
|
)
|
||||||
|
|
||||||
var (
|
var diagnosticsArgs struct {
|
||||||
diagnosticsArgs struct {
|
|
||||||
IncludeEndpoints bool
|
IncludeEndpoints bool
|
||||||
IncludeLogs bool
|
IncludeLogs bool
|
||||||
ReviewBeforeUpload bool
|
ReviewBeforeUpload bool
|
||||||
HastebinURL string
|
HastebinURL string
|
||||||
LogLines int
|
LogLines int
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
||||||
func newDiagnosticsCommand() *cobra.Command {
|
func newDiagnosticsCommand() *cobra.Command {
|
||||||
command := &cobra.Command{
|
command := &cobra.Command{
|
||||||
|
@ -79,7 +78,7 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||||
{
|
{
|
||||||
Name: "ReviewBeforeUpload",
|
Name: "ReviewBeforeUpload",
|
||||||
Prompt: &survey.Confirm{
|
Prompt: &survey.Confirm{
|
||||||
Message: "Do you want to review the collected data before uploading to hastebin.com?",
|
Message: "Do you want to review the collected data before uploading to " + diagnosticsArgs.HastebinURL + "?",
|
||||||
Help: "The data, especially the logs, might contain sensitive information, so you should review it. You will be asked again if you want to upload.",
|
Help: "The data, especially the logs, might contain sensitive information, so you should review it. You will be asked again if you want to upload.",
|
||||||
Default: true,
|
Default: true,
|
||||||
},
|
},
|
||||||
|
@ -97,12 +96,12 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||||
output := &strings.Builder{}
|
output := &strings.Builder{}
|
||||||
fmt.Fprintln(output, "Pterodactyl Wings - Diagnostics Report")
|
fmt.Fprintln(output, "Pterodactyl Wings - Diagnostics Report")
|
||||||
printHeader(output, "Versions")
|
printHeader(output, "Versions")
|
||||||
fmt.Fprintln(output, " wings:", system.Version)
|
fmt.Fprintln(output, " Wings:", system.Version)
|
||||||
if dockerErr == nil {
|
if dockerErr == nil {
|
||||||
fmt.Fprintln(output, "Docker:", dockerVersion.Version)
|
fmt.Fprintln(output, " Docker:", dockerVersion.Version)
|
||||||
}
|
}
|
||||||
if v, err := kernel.GetKernelVersion(); err == nil {
|
if v, err := kernel.GetKernelVersion(); err == nil {
|
||||||
fmt.Fprintln(output, "Kernel:", v)
|
fmt.Fprintln(output, " Kernel:", v)
|
||||||
}
|
}
|
||||||
if os, err := operatingsystem.GetOperatingSystem(); err == nil {
|
if os, err := operatingsystem.GetOperatingSystem(); err == nil {
|
||||||
fmt.Fprintln(output, " OS:", os)
|
fmt.Fprintln(output, " OS:", os)
|
||||||
|
@ -110,7 +109,6 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
printHeader(output, "Wings Configuration")
|
printHeader(output, "Wings Configuration")
|
||||||
if err := config.FromFile(config.DefaultLocation); err != nil {
|
if err := config.FromFile(config.DefaultLocation); err != nil {
|
||||||
|
|
||||||
}
|
}
|
||||||
cfg := config.Get()
|
cfg := config.Get()
|
||||||
fmt.Fprintln(output, " Panel Location:", redact(cfg.PanelLocation))
|
fmt.Fprintln(output, " Panel Location:", redact(cfg.PanelLocation))
|
||||||
|
@ -190,6 +188,16 @@ func diagnosticsCmdRun(cmd *cobra.Command, args []string) {
|
||||||
survey.AskOne(&survey.Confirm{Message: "Upload to " + diagnosticsArgs.HastebinURL + "?", Default: false}, &upload)
|
survey.AskOne(&survey.Confirm{Message: "Upload to " + diagnosticsArgs.HastebinURL + "?", Default: false}, &upload)
|
||||||
}
|
}
|
||||||
if upload {
|
if upload {
|
||||||
|
if !diagnosticsArgs.IncludeEndpoints {
|
||||||
|
s := output.String()
|
||||||
|
output.Reset()
|
||||||
|
a := strings.ReplaceAll(cfg.PanelLocation, s, "{redacted}")
|
||||||
|
a = strings.ReplaceAll(cfg.Api.Host, a, "{redacted}")
|
||||||
|
a = strings.ReplaceAll(cfg.Api.Ssl.CertificateFile, a, "{redacted}")
|
||||||
|
a = strings.ReplaceAll(cfg.Api.Ssl.KeyFile, a, "{redacted}")
|
||||||
|
a = strings.ReplaceAll(cfg.System.Sftp.Address, a, "{redacted}")
|
||||||
|
output.WriteString(a)
|
||||||
|
}
|
||||||
u, err := uploadToHastebin(diagnosticsArgs.HastebinURL, output.String())
|
u, err := uploadToHastebin(diagnosticsArgs.HastebinURL, output.String())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fmt.Println("Your report is available here: ", u)
|
fmt.Println("Your report is available here: ", u)
|
||||||
|
@ -226,7 +234,7 @@ func uploadToHastebin(hbUrl, content string) (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
pres := make(map[string]interface{})
|
pres := make(map[string]interface{})
|
||||||
body, err := ioutil.ReadAll(res.Body)
|
body, err := io.ReadAll(res.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Failed to parse response.", err)
|
fmt.Println("Failed to parse response.", err)
|
||||||
return "", err
|
return "", err
|
||||||
|
|
|
@ -359,7 +359,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
||||||
// Check if main http server should run with TLS. Otherwise reset the TLS
|
// Check if main http server should run with TLS. Otherwise reset the TLS
|
||||||
// config on the server and then serve it over normal HTTP.
|
// config on the server and then serve it over normal HTTP.
|
||||||
if api.Ssl.Enabled {
|
if api.Ssl.Enabled {
|
||||||
if err := s.ListenAndServeTLS(strings.ToLower(api.Ssl.CertificateFile), strings.ToLower(api.Ssl.KeyFile)); err != nil {
|
if err := s.ListenAndServeTLS(api.Ssl.CertificateFile, api.Ssl.KeyFile); err != nil {
|
||||||
log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server")
|
log.WithFields(log.Fields{"auto_tls": false, "error": err}).Fatal("failed to configure HTTPS server")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/user"
|
"os/user"
|
||||||
|
@ -380,7 +379,7 @@ func WriteToDisk(c *Configuration) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := ioutil.WriteFile(c.path, b, 0o600); err != nil {
|
if err := os.WriteFile(c.path, b, 0o600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -448,7 +447,7 @@ func EnsurePterodactylUser() error {
|
||||||
// FromFile reads the configuration from the provided file and stores it in the
|
// FromFile reads the configuration from the provided file and stores it in the
|
||||||
// global singleton for this instance.
|
// global singleton for this instance.
|
||||||
func FromFile(path string) error {
|
func FromFile(path string) error {
|
||||||
b, err := ioutil.ReadFile(path)
|
b, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -456,31 +455,17 @@ func FromFile(path string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Replace environment variables within the configuration file with their
|
|
||||||
// values from the host system. This function works almost identically to
|
|
||||||
// the default os.ExpandEnv function, except it supports escaping dollar
|
|
||||||
// signs in the text if you pass "$$" through.
|
|
||||||
//
|
|
||||||
// "some$$foo" -> "some$foo"
|
|
||||||
// "some$foo" -> "some" (or "someVALUE_OF_FOO" if FOO is defined in env)
|
|
||||||
//
|
|
||||||
// @see https://github.com/pterodactyl/panel/issues/3692
|
|
||||||
exp := os.Expand(string(b), func(s string) string {
|
|
||||||
if s == "$" {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return os.Getenv(s)
|
|
||||||
})
|
|
||||||
|
|
||||||
if err := yaml.Unmarshal([]byte(exp), c); err != nil {
|
if err := yaml.Unmarshal(b, c); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store this configuration in the global state.
|
// Store this configuration in the global state.
|
||||||
Set(c)
|
Set(c)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigureDirectories ensures that all of the system directories exist on the
|
// ConfigureDirectories ensures that all the system directories exist on the
|
||||||
// system. These directories are created so that only the owner can read the data,
|
// system. These directories are created so that only the owner can read the data,
|
||||||
// and no other users.
|
// and no other users.
|
||||||
//
|
//
|
||||||
|
@ -592,7 +577,7 @@ func ConfigureTimezone() error {
|
||||||
_config.System.Timezone = tz
|
_config.System.Timezone = tz
|
||||||
}
|
}
|
||||||
if _config.System.Timezone == "" {
|
if _config.System.Timezone == "" {
|
||||||
b, err := ioutil.ReadFile("/etc/timezone")
|
b, err := os.ReadFile("/etc/timezone")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
return errors.WithMessage(err, "config: failed to open timezone file")
|
return errors.WithMessage(err, "config: failed to open timezone file")
|
||||||
|
|
|
@ -3,6 +3,7 @@ package config
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
@ -51,9 +52,9 @@ type DockerConfiguration struct {
|
||||||
// Registries .
|
// Registries .
|
||||||
Registries map[string]RegistryConfiguration `json:"registries" yaml:"registries"`
|
Registries map[string]RegistryConfiguration `json:"registries" yaml:"registries"`
|
||||||
|
|
||||||
// The size of the /tmp directory when mounted into a container. Please be aware that Docker
|
// TmpfsSize specifies the size for the /tmp directory mounted into containers. Please be
|
||||||
// utilizes host memory for this value, and that we do not keep track of the space used here
|
// aware that Docker utilizes the host's system memory for this value, and that we do not
|
||||||
// so avoid allocating too much to a server.
|
// keep track of the space used there, so avoid allocating too much to a server.
|
||||||
TmpfsSize uint `default:"100" json:"tmpfs_size" yaml:"tmpfs_size"`
|
TmpfsSize uint `default:"100" json:"tmpfs_size" yaml:"tmpfs_size"`
|
||||||
|
|
||||||
// ContainerPidLimit sets the total number of processes that can be active in a container
|
// ContainerPidLimit sets the total number of processes that can be active in a container
|
||||||
|
@ -62,14 +63,18 @@ type DockerConfiguration struct {
|
||||||
// available pids and crash.
|
// available pids and crash.
|
||||||
ContainerPidLimit int64 `default:"512" json:"container_pid_limit" yaml:"container_pid_limit"`
|
ContainerPidLimit int64 `default:"512" json:"container_pid_limit" yaml:"container_pid_limit"`
|
||||||
|
|
||||||
// InstallLimits defines the limits on the installer containers that prevents a server's
|
// InstallerLimits defines the limits on the installer containers that prevents a server's
|
||||||
// installation process from unintentionally consuming more resources than expected. This
|
// installation process from unintentionally consuming more resources than expected. This
|
||||||
// is used in conjunction with the server's defined limits. Whichever value is higher will
|
// is used in conjunction with the server's defined limits. Whichever value is higher will
|
||||||
// take precedence in the install containers.
|
// take precedence in the installer containers.
|
||||||
InstallerLimits struct {
|
InstallerLimits struct {
|
||||||
Memory int64 `default:"1024" json:"memory" yaml:"memory"`
|
Memory int64 `default:"1024" json:"memory" yaml:"memory"`
|
||||||
Cpu int64 `default:"100" json:"cpu" yaml:"cpu"`
|
Cpu int64 `default:"100" json:"cpu" yaml:"cpu"`
|
||||||
} `json:"installer_limits" yaml:"installer_limits"`
|
} `json:"installer_limits" yaml:"installer_limits"`
|
||||||
|
|
||||||
|
// Overhead controls the memory overhead given to all containers to circumvent certain
|
||||||
|
// software such as the JVM not staying below the maximum memory limit.
|
||||||
|
Overhead Overhead `json:"overhead" yaml:"overhead"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegistryConfiguration defines the authentication credentials for a given
|
// RegistryConfiguration defines the authentication credentials for a given
|
||||||
|
@ -91,3 +96,62 @@ func (c RegistryConfiguration) Base64() (string, error) {
|
||||||
}
|
}
|
||||||
return base64.URLEncoding.EncodeToString(b), nil
|
return base64.URLEncoding.EncodeToString(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Overhead controls the memory overhead given to all containers to circumvent certain
|
||||||
|
// software such as the JVM not staying below the maximum memory limit.
|
||||||
|
type Overhead struct {
|
||||||
|
// Override controls if the overhead limits should be overridden by the values in the config file.
|
||||||
|
Override bool `default:"false" json:"override" yaml:"override"`
|
||||||
|
|
||||||
|
// DefaultMultiplier sets the default multiplier for if no Multipliers are able to be applied.
|
||||||
|
DefaultMultiplier float64 `default:"1.05" json:"default_multiplier" yaml:"default_multiplier"`
|
||||||
|
|
||||||
|
// Multipliers allows overriding DefaultMultiplier depending on the amount of memory
|
||||||
|
// configured for a server.
|
||||||
|
//
|
||||||
|
// Default values (used if Override is `false`)
|
||||||
|
// - Less than 2048 MB of memory, multiplier of 1.15 (15%)
|
||||||
|
// - Less than 4096 MB of memory, multiplier of 1.10 (10%)
|
||||||
|
// - Otherwise, multiplier of 1.05 (5%) - specified in DefaultMultiplier
|
||||||
|
//
|
||||||
|
// If the defaults were specified in the config they would look like:
|
||||||
|
// ```yaml
|
||||||
|
// multipliers:
|
||||||
|
// 2048: 1.15
|
||||||
|
// 4096: 1.10
|
||||||
|
// ```
|
||||||
|
Multipliers map[int]float64 `json:"multipliers" yaml:"multipliers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o Overhead) GetMultiplier(memoryLimit int64) float64 {
|
||||||
|
// Default multiplier values.
|
||||||
|
if !o.Override {
|
||||||
|
if memoryLimit <= 2048 {
|
||||||
|
return 1.15
|
||||||
|
} else if memoryLimit <= 4096 {
|
||||||
|
return 1.10
|
||||||
|
}
|
||||||
|
return 1.05
|
||||||
|
}
|
||||||
|
|
||||||
|
// This plucks the keys of the Multipliers map, so they can be sorted from
|
||||||
|
// smallest to largest in order to correctly apply the proper multiplier.
|
||||||
|
i := 0
|
||||||
|
multipliers := make([]int, len(o.Multipliers))
|
||||||
|
for k := range o.Multipliers {
|
||||||
|
multipliers[i] = k
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
sort.Ints(multipliers)
|
||||||
|
|
||||||
|
// Loop through the memory values in order (smallest to largest)
|
||||||
|
for _, m := range multipliers {
|
||||||
|
// If the server's memory limit exceeds the modifier's limit, don't apply it.
|
||||||
|
if memoryLimit > int64(m) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return o.Multipliers[m]
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.DefaultMultiplier
|
||||||
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ type Allocations struct {
|
||||||
//
|
//
|
||||||
// You'll want to use DockerBindings() if you need to re-map 127.0.0.1 to the Docker interface.
|
// You'll want to use DockerBindings() if you need to re-map 127.0.0.1 to the Docker interface.
|
||||||
func (a *Allocations) Bindings() nat.PortMap {
|
func (a *Allocations) Bindings() nat.PortMap {
|
||||||
var out = nat.PortMap{}
|
out := nat.PortMap{}
|
||||||
|
|
||||||
for ip, ports := range a.Mappings {
|
for ip, ports := range a.Mappings {
|
||||||
for _, port := range ports {
|
for _, port := range ports {
|
||||||
|
@ -94,7 +94,7 @@ func (a *Allocations) DockerBindings() nat.PortMap {
|
||||||
// To accomplish this, we'll just get the values from "DockerBindings" and then set them
|
// To accomplish this, we'll just get the values from "DockerBindings" and then set them
|
||||||
// to empty structs. Because why not.
|
// to empty structs. Because why not.
|
||||||
func (a *Allocations) Exposed() nat.PortSet {
|
func (a *Allocations) Exposed() nat.PortSet {
|
||||||
var out = nat.PortSet{}
|
out := nat.PortSet{}
|
||||||
|
|
||||||
for port := range a.DockerBindings() {
|
for port := range a.DockerBindings() {
|
||||||
out[port] = struct{}{}
|
out[port] = struct{}{}
|
||||||
|
|
|
@ -14,8 +14,10 @@ import (
|
||||||
"github.com/pterodactyl/wings/config"
|
"github.com/pterodactyl/wings/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _conce sync.Once
|
var (
|
||||||
var _client *client.Client
|
_conce sync.Once
|
||||||
|
_client *client.Client
|
||||||
|
)
|
||||||
|
|
||||||
// Docker returns a docker client to be used throughout the codebase. Once a
|
// Docker returns a docker client to be used throughout the codebase. Once a
|
||||||
// client has been created it will be returned for all subsequent calls to this
|
// client has been created it will be returned for all subsequent calls to this
|
||||||
|
|
|
@ -342,10 +342,10 @@ func (e *Environment) followOutput() error {
|
||||||
func (e *Environment) scanOutput(reader io.ReadCloser) {
|
func (e *Environment) scanOutput(reader io.ReadCloser) {
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
|
||||||
events := e.Events()
|
if err := system.ScanReader(reader, func(v []byte) {
|
||||||
|
e.logCallbackMx.Lock()
|
||||||
if err := system.ScanReader(reader, func(line string) {
|
defer e.logCallbackMx.Unlock()
|
||||||
events.Publish(environment.ConsoleOutputEvent, line)
|
e.logCallback(v)
|
||||||
}); err != nil && err != io.EOF {
|
}); err != nil && err != io.EOF {
|
||||||
log.WithField("error", err).WithField("container_id", e.Id).Warn("error processing scanner line in console output")
|
log.WithField("error", err).WithField("container_id", e.Id).Warn("error processing scanner line in console output")
|
||||||
return
|
return
|
||||||
|
|
|
@ -49,7 +49,10 @@ type Environment struct {
|
||||||
// Holds the stats stream used by the polling commands so that we can easily close it out.
|
// Holds the stats stream used by the polling commands so that we can easily close it out.
|
||||||
stats io.ReadCloser
|
stats io.ReadCloser
|
||||||
|
|
||||||
emitter *events.EventBus
|
emitter *events.Bus
|
||||||
|
|
||||||
|
logCallbackMx sync.Mutex
|
||||||
|
logCallback func([]byte)
|
||||||
|
|
||||||
// Tracks the environment state.
|
// Tracks the environment state.
|
||||||
st *system.AtomicString
|
st *system.AtomicString
|
||||||
|
@ -100,9 +103,9 @@ func (e *Environment) IsAttached() bool {
|
||||||
return e.stream != nil
|
return e.stream != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Environment) Events() *events.EventBus {
|
func (e *Environment) Events() *events.Bus {
|
||||||
e.eventMu.Do(func() {
|
e.eventMu.Do(func() {
|
||||||
e.emitter = events.New()
|
e.emitter = events.NewBus()
|
||||||
})
|
})
|
||||||
|
|
||||||
return e.emitter
|
return e.emitter
|
||||||
|
@ -114,7 +117,6 @@ func (e *Environment) Events() *events.EventBus {
|
||||||
// ID auto-assigned when the container is created.
|
// ID auto-assigned when the container is created.
|
||||||
func (e *Environment) Exists() (bool, error) {
|
func (e *Environment) Exists() (bool, error) {
|
||||||
_, err := e.client.ContainerInspect(context.Background(), e.Id)
|
_, err := e.client.ContainerInspect(context.Background(), e.Id)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If this error is because the container instance wasn't found via Docker we
|
// If this error is because the container instance wasn't found via Docker we
|
||||||
// can safely ignore the error and just return false.
|
// can safely ignore the error and just return false.
|
||||||
|
@ -215,3 +217,10 @@ func (e *Environment) SetState(state string) {
|
||||||
e.Events().Publish(environment.StateChangeEvent, state)
|
e.Events().Publish(environment.StateChangeEvent, state)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *Environment) SetLogCallback(f func([]byte)) {
|
||||||
|
e.logCallbackMx.Lock()
|
||||||
|
defer e.logCallbackMx.Unlock()
|
||||||
|
|
||||||
|
e.logCallback = f
|
||||||
|
}
|
||||||
|
|
|
@ -90,11 +90,7 @@ func (e *Environment) pollResources(ctx context.Context) error {
|
||||||
st.Network.TxBytes += nw.TxBytes
|
st.Network.TxBytes += nw.TxBytes
|
||||||
}
|
}
|
||||||
|
|
||||||
if b, err := json.Marshal(st); err != nil {
|
e.Events().Publish(environment.ResourceEvent, st)
|
||||||
e.log().WithField("error", err).Warn("error while marshaling stats object for environment")
|
|
||||||
} else {
|
|
||||||
e.Events().Publish(environment.ResourceEvent, string(b))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ConsoleOutputEvent = "console output"
|
|
||||||
StateChangeEvent = "state change"
|
StateChangeEvent = "state change"
|
||||||
ResourceEvent = "resources"
|
ResourceEvent = "resources"
|
||||||
DockerImagePullStarted = "docker image pull started"
|
DockerImagePullStarted = "docker image pull started"
|
||||||
|
@ -35,7 +34,7 @@ type ProcessEnvironment interface {
|
||||||
// Returns an event emitter instance that can be hooked into to listen for different
|
// Returns an event emitter instance that can be hooked into to listen for different
|
||||||
// events that are fired by the environment. This should not allow someone to publish
|
// events that are fired by the environment. This should not allow someone to publish
|
||||||
// events, only subscribe to them.
|
// events, only subscribe to them.
|
||||||
Events() *events.EventBus
|
Events() *events.Bus
|
||||||
|
|
||||||
// Determines if the server instance exists. For example, in a docker environment
|
// Determines if the server instance exists. For example, in a docker environment
|
||||||
// this should confirm that the container is created and in a bootable state. In
|
// this should confirm that the container is created and in a bootable state. In
|
||||||
|
@ -108,4 +107,7 @@ type ProcessEnvironment interface {
|
||||||
// Uptime returns the current environment uptime in milliseconds. This is
|
// Uptime returns the current environment uptime in milliseconds. This is
|
||||||
// the time that has passed since it was last started.
|
// the time that has passed since it was last started.
|
||||||
Uptime(ctx context.Context) (int64, error)
|
Uptime(ctx context.Context) (int64, error)
|
||||||
|
|
||||||
|
// SetLogCallback sets the callback that the container's log output will be passed to.
|
||||||
|
SetLogCallback(func([]byte))
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,13 +75,7 @@ func (l Limits) ConvertedCpuLimit() int64 {
|
||||||
// server is < 4G, use 10%, if less than 2G use 15%. This avoids unexpected
|
// server is < 4G, use 10%, if less than 2G use 15%. This avoids unexpected
|
||||||
// crashes from processes like Java which run over the limit.
|
// crashes from processes like Java which run over the limit.
|
||||||
func (l Limits) MemoryOverheadMultiplier() float64 {
|
func (l Limits) MemoryOverheadMultiplier() float64 {
|
||||||
if l.MemoryLimit <= 2048 {
|
return config.Get().Docker.Overhead.GetMultiplier(l.MemoryLimit)
|
||||||
return 1.15
|
|
||||||
} else if l.MemoryLimit <= 4096 {
|
|
||||||
return 1.10
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1.05
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l Limits) BoundedMemoryLimit() int64 {
|
func (l Limits) BoundedMemoryLimit() int64 {
|
||||||
|
|
176
events/events.go
176
events/events.go
|
@ -1,32 +1,79 @@
|
||||||
package events
|
package events
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/gammazero/workerpool"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Listener chan Event
|
||||||
|
|
||||||
|
// Event represents an Event sent over a Bus.
|
||||||
type Event struct {
|
type Event struct {
|
||||||
Data string
|
|
||||||
Topic string
|
Topic string
|
||||||
|
Data interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type EventBus struct {
|
// Bus represents an Event Bus.
|
||||||
mu sync.RWMutex
|
type Bus struct {
|
||||||
pools map[string]*CallbackPool
|
listenersMx sync.Mutex
|
||||||
|
listeners map[string][]Listener
|
||||||
}
|
}
|
||||||
|
|
||||||
func New() *EventBus {
|
// NewBus returns a new empty Event Bus.
|
||||||
return &EventBus{
|
func NewBus() *Bus {
|
||||||
pools: make(map[string]*CallbackPool),
|
return &Bus{
|
||||||
|
listeners: make(map[string][]Listener),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Publish data to a given topic.
|
// Off unregisters a listener from the specified topics on the Bus.
|
||||||
func (e *EventBus) Publish(topic string, data string) {
|
func (b *Bus) Off(listener Listener, topics ...string) {
|
||||||
t := topic
|
b.listenersMx.Lock()
|
||||||
|
defer b.listenersMx.Unlock()
|
||||||
|
|
||||||
|
for _, topic := range topics {
|
||||||
|
b.off(topic, listener)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Bus) off(topic string, listener Listener) bool {
|
||||||
|
listeners, ok := b.listeners[topic]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, l := range listeners {
|
||||||
|
if l != listener {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
listeners = append(listeners[:i], listeners[i+1:]...)
|
||||||
|
b.listeners[topic] = listeners
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// On registers a listener to the specified topics on the Bus.
|
||||||
|
func (b *Bus) On(listener Listener, topics ...string) {
|
||||||
|
b.listenersMx.Lock()
|
||||||
|
defer b.listenersMx.Unlock()
|
||||||
|
|
||||||
|
for _, topic := range topics {
|
||||||
|
b.on(topic, listener)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Bus) on(topic string, listener Listener) {
|
||||||
|
listeners, ok := b.listeners[topic]
|
||||||
|
if !ok {
|
||||||
|
b.listeners[topic] = []Listener{listener}
|
||||||
|
} else {
|
||||||
|
b.listeners[topic] = append(listeners, listener)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish publishes a message to the Bus.
|
||||||
|
func (b *Bus) Publish(topic string, data interface{}) {
|
||||||
// Some of our topics for the socket support passing a more specific namespace,
|
// Some of our topics for the socket support passing a more specific namespace,
|
||||||
// such as "backup completed:1234" to indicate which specific backup was completed.
|
// such as "backup completed:1234" to indicate which specific backup was completed.
|
||||||
//
|
//
|
||||||
|
@ -36,87 +83,44 @@ func (e *EventBus) Publish(topic string, data string) {
|
||||||
parts := strings.SplitN(topic, ":", 2)
|
parts := strings.SplitN(topic, ":", 2)
|
||||||
|
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
t = parts[0]
|
topic = parts[0]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e.mu.RLock()
|
b.listenersMx.Lock()
|
||||||
defer e.mu.RUnlock()
|
defer b.listenersMx.Unlock()
|
||||||
|
|
||||||
// Acquire a read lock and loop over all the channels registered for the topic. This
|
listeners, ok := b.listeners[topic]
|
||||||
// avoids a panic crash if the process tries to unregister the channel while this routine
|
if !ok {
|
||||||
// is running.
|
return
|
||||||
if cp, ok := e.pools[t]; ok {
|
|
||||||
for _, callback := range cp.callbacks {
|
|
||||||
c := *callback
|
|
||||||
evt := Event{Data: data, Topic: topic}
|
|
||||||
// Using the workerpool with one worker allows us to execute events in a FIFO manner. Running
|
|
||||||
// this using goroutines would cause things such as console output to just output in random order
|
|
||||||
// if more than one event is fired at the same time.
|
|
||||||
//
|
|
||||||
// However, the pool submission does not block the execution of this function itself, allowing
|
|
||||||
// us to call publish without blocking any of the other pathways.
|
|
||||||
//
|
|
||||||
// @see https://github.com/pterodactyl/panel/issues/2303
|
|
||||||
cp.pool.Submit(func() {
|
|
||||||
c(evt)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
if len(listeners) < 1 {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
event := Event{Topic: topic, Data: data}
|
||||||
|
for _, listener := range listeners {
|
||||||
|
l := listener
|
||||||
|
wg.Add(1)
|
||||||
|
go func(l Listener, event Event) {
|
||||||
|
defer wg.Done()
|
||||||
|
l <- event
|
||||||
|
}(l, event)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublishJson publishes a JSON message to a given topic.
|
// Destroy destroys the Event Bus by unregistering and closing all listeners.
|
||||||
func (e *EventBus) PublishJson(topic string, data interface{}) error {
|
func (b *Bus) Destroy() {
|
||||||
b, err := json.Marshal(data)
|
b.listenersMx.Lock()
|
||||||
if err != nil {
|
defer b.listenersMx.Unlock()
|
||||||
return err
|
|
||||||
|
for _, listeners := range b.listeners {
|
||||||
|
for _, listener := range listeners {
|
||||||
|
close(listener)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
e.Publish(topic, string(b))
|
b.listeners = make(map[string][]Listener)
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// On adds a callback function that will be executed each time one of the events using the topic
|
|
||||||
// name is called.
|
|
||||||
func (e *EventBus) On(topic string, callback *func(Event)) {
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
// Check if this topic has been registered at least once for the event listener, and if
|
|
||||||
// not create an empty struct for the topic.
|
|
||||||
if _, exists := e.pools[topic]; !exists {
|
|
||||||
e.pools[topic] = &CallbackPool{
|
|
||||||
callbacks: make([]*func(Event), 0),
|
|
||||||
pool: workerpool.New(1),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this callback is not already registered as an event listener, go ahead and append
|
|
||||||
// it to the array of callbacks for this topic.
|
|
||||||
e.pools[topic].Add(callback)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Off removes an event listener from the bus.
|
|
||||||
func (e *EventBus) Off(topic string, callback *func(Event)) {
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
if cp, ok := e.pools[topic]; ok {
|
|
||||||
cp.Remove(callback)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy removes all the event listeners that have been registered for any topic. Also stops the worker
|
|
||||||
// pool to close that routine.
|
|
||||||
func (e *EventBus) Destroy() {
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
|
|
||||||
// Stop every pool that exists for a given callback topic.
|
|
||||||
for _, cp := range e.pools {
|
|
||||||
cp.pool.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
e.pools = make(map[string]*CallbackPool)
|
|
||||||
}
|
}
|
||||||
|
|
180
events/events_test.go
Normal file
180
events/events_test.go
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
package events
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "github.com/franela/goblin"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewBus(t *testing.T) {
|
||||||
|
g := Goblin(t)
|
||||||
|
bus := NewBus()
|
||||||
|
|
||||||
|
g.Describe("NewBus", func() {
|
||||||
|
g.It("is not nil", func() {
|
||||||
|
g.Assert(bus).IsNotNil("Bus expected to not be nil")
|
||||||
|
g.Assert(bus.listeners).IsNotNil("Bus#listeners expected to not be nil")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBus_Off(t *testing.T) {
|
||||||
|
g := Goblin(t)
|
||||||
|
|
||||||
|
const topic = "test"
|
||||||
|
|
||||||
|
g.Describe("Off", func() {
|
||||||
|
g.It("unregisters listener", func() {
|
||||||
|
bus := NewBus()
|
||||||
|
|
||||||
|
g.Assert(bus.listeners[topic]).IsNotNil()
|
||||||
|
g.Assert(len(bus.listeners[topic])).IsZero()
|
||||||
|
listener := make(chan Event)
|
||||||
|
bus.On(listener, topic)
|
||||||
|
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
|
||||||
|
|
||||||
|
bus.Off(listener, topic)
|
||||||
|
g.Assert(len(bus.listeners[topic])).Equal(0, "Topic still has one or more listeners")
|
||||||
|
|
||||||
|
close(listener)
|
||||||
|
})
|
||||||
|
|
||||||
|
g.It("unregisters correct listener", func() {
|
||||||
|
bus := NewBus()
|
||||||
|
|
||||||
|
listener := make(chan Event)
|
||||||
|
listener2 := make(chan Event)
|
||||||
|
listener3 := make(chan Event)
|
||||||
|
bus.On(listener, topic)
|
||||||
|
bus.On(listener2, topic)
|
||||||
|
bus.On(listener3, topic)
|
||||||
|
g.Assert(len(bus.listeners[topic])).Equal(3, "Listeners were not registered")
|
||||||
|
|
||||||
|
bus.Off(listener, topic)
|
||||||
|
bus.Off(listener3, topic)
|
||||||
|
g.Assert(len(bus.listeners[topic])).Equal(1, "Expected 1 listener to remain")
|
||||||
|
|
||||||
|
if bus.listeners[topic][0] != listener2 {
|
||||||
|
// A normal Assert does not properly compare channels.
|
||||||
|
g.Fail("wrong listener unregistered")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
bus.Off(listener2, topic)
|
||||||
|
close(listener)
|
||||||
|
close(listener2)
|
||||||
|
close(listener3)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBus_On(t *testing.T) {
|
||||||
|
g := Goblin(t)
|
||||||
|
|
||||||
|
const topic = "test"
|
||||||
|
|
||||||
|
g.Describe("On", func() {
|
||||||
|
g.It("registers listener", func() {
|
||||||
|
bus := NewBus()
|
||||||
|
|
||||||
|
g.Assert(bus.listeners[topic]).IsNotNil()
|
||||||
|
g.Assert(len(bus.listeners[topic])).IsZero()
|
||||||
|
listener := make(chan Event)
|
||||||
|
bus.On(listener, topic)
|
||||||
|
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
|
||||||
|
|
||||||
|
if bus.listeners[topic][0] != listener {
|
||||||
|
// A normal Assert does not properly compare channels.
|
||||||
|
g.Fail("wrong listener registered")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
bus.Off(listener, topic)
|
||||||
|
close(listener)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBus_Publish(t *testing.T) {
|
||||||
|
g := Goblin(t)
|
||||||
|
|
||||||
|
const topic = "test"
|
||||||
|
const message = "this is a test message!"
|
||||||
|
|
||||||
|
g.Describe("Publish", func() {
|
||||||
|
g.It("publishes message", func() {
|
||||||
|
bus := NewBus()
|
||||||
|
|
||||||
|
g.Assert(bus.listeners[topic]).IsNotNil()
|
||||||
|
g.Assert(len(bus.listeners[topic])).IsZero()
|
||||||
|
listener := make(chan Event)
|
||||||
|
bus.On(listener, topic)
|
||||||
|
g.Assert(len(bus.listeners[topic])).Equal(1, "Listener was not registered")
|
||||||
|
|
||||||
|
done := make(chan struct{}, 1)
|
||||||
|
go func() {
|
||||||
|
select {
|
||||||
|
case m := <-listener:
|
||||||
|
g.Assert(m.Topic).Equal(topic)
|
||||||
|
g.Assert(m.Data).Equal(message)
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
g.Fail("listener did not receive message in time")
|
||||||
|
}
|
||||||
|
done <- struct{}{}
|
||||||
|
}()
|
||||||
|
bus.Publish(topic, message)
|
||||||
|
<-done
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
close(listener)
|
||||||
|
bus.Off(listener, topic)
|
||||||
|
})
|
||||||
|
|
||||||
|
g.It("publishes message to all listeners", func() {
|
||||||
|
bus := NewBus()
|
||||||
|
|
||||||
|
g.Assert(bus.listeners[topic]).IsNotNil()
|
||||||
|
g.Assert(len(bus.listeners[topic])).IsZero()
|
||||||
|
listener := make(chan Event)
|
||||||
|
listener2 := make(chan Event)
|
||||||
|
listener3 := make(chan Event)
|
||||||
|
bus.On(listener, topic)
|
||||||
|
bus.On(listener2, topic)
|
||||||
|
bus.On(listener3, topic)
|
||||||
|
g.Assert(len(bus.listeners[topic])).Equal(3, "Listener was not registered")
|
||||||
|
|
||||||
|
done := make(chan struct{}, 1)
|
||||||
|
go func() {
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
select {
|
||||||
|
case m := <-listener:
|
||||||
|
g.Assert(m.Topic).Equal(topic)
|
||||||
|
g.Assert(m.Data).Equal(message)
|
||||||
|
case m := <-listener2:
|
||||||
|
g.Assert(m.Topic).Equal(topic)
|
||||||
|
g.Assert(m.Data).Equal(message)
|
||||||
|
case m := <-listener3:
|
||||||
|
g.Assert(m.Topic).Equal(topic)
|
||||||
|
g.Assert(m.Data).Equal(message)
|
||||||
|
case <-time.After(1 * time.Second):
|
||||||
|
g.Fail("all listeners did not receive the message in time")
|
||||||
|
i = 3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
done <- struct{}{}
|
||||||
|
}()
|
||||||
|
bus.Publish(topic, message)
|
||||||
|
<-done
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
bus.Off(listener, topic)
|
||||||
|
bus.Off(listener2, topic)
|
||||||
|
bus.Off(listener3, topic)
|
||||||
|
close(listener)
|
||||||
|
close(listener2)
|
||||||
|
close(listener3)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,50 +0,0 @@
|
||||||
package events
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"github.com/gammazero/workerpool"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CallbackPool struct {
|
|
||||||
callbacks []*func(Event)
|
|
||||||
pool *workerpool.WorkerPool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pushes a new callback into the array of listeners for the pool.
|
|
||||||
func (cp *CallbackPool) Add(callback *func(Event)) {
|
|
||||||
if cp.index(reflect.ValueOf(callback)) < 0 {
|
|
||||||
cp.callbacks = append(cp.callbacks, callback)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removes a callback from the array of registered callbacks if it exists.
|
|
||||||
func (cp *CallbackPool) Remove(callback *func(Event)) {
|
|
||||||
i := cp.index(reflect.ValueOf(callback))
|
|
||||||
|
|
||||||
// If i < 0 it means there was no index found for the given callback, meaning it was
|
|
||||||
// never registered or was already unregistered from the listeners. Also double check
|
|
||||||
// that we didn't somehow escape the length of the topic callback (not sure how that
|
|
||||||
// would happen, but lets avoid a panic condition).
|
|
||||||
if i < 0 || i >= len(cp.callbacks) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// We can assume that the topic still exists at this point since we acquire an exclusive
|
|
||||||
// lock on the process, and the "e.index" function cannot return a value >= 0 if there is
|
|
||||||
// no topic already existing.
|
|
||||||
cp.callbacks = append(cp.callbacks[:i], cp.callbacks[i+1:]...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finds the index of a given callback in the topic by comparing all of the registered callback
|
|
||||||
// pointers to the passed function. This function does not aquire a lock as it should only be called
|
|
||||||
// within the confines of a function that has already acquired a lock for the duration of the lookup.
|
|
||||||
func (cp *CallbackPool) index(v reflect.Value) int {
|
|
||||||
for i, handler := range cp.callbacks {
|
|
||||||
if reflect.ValueOf(handler).Pointer() == v.Pointer() {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1
|
|
||||||
}
|
|
126
go.mod
126
go.mod
|
@ -1,77 +1,115 @@
|
||||||
module github.com/pterodactyl/wings
|
module github.com/pterodactyl/wings
|
||||||
|
|
||||||
go 1.16
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
emperror.dev/errors v0.8.0
|
emperror.dev/errors v0.8.0
|
||||||
github.com/AlecAivazis/survey/v2 v2.3.2
|
github.com/AlecAivazis/survey/v2 v2.2.15
|
||||||
github.com/Jeffail/gabs/v2 v2.6.1
|
github.com/Jeffail/gabs/v2 v2.6.1
|
||||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
|
||||||
github.com/Microsoft/hcsshim v0.9.0 // indirect
|
|
||||||
github.com/NYTimes/logrotate v1.0.0
|
github.com/NYTimes/logrotate v1.0.0
|
||||||
github.com/andybalholm/brotli v1.0.3 // indirect
|
|
||||||
github.com/apex/log v1.9.0
|
github.com/apex/log v1.9.0
|
||||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||||
github.com/beevik/etree v1.1.0
|
github.com/beevik/etree v1.1.0
|
||||||
github.com/buger/jsonparser v1.1.1
|
github.com/buger/jsonparser v1.1.1
|
||||||
github.com/cenkalti/backoff/v4 v4.1.1
|
github.com/cenkalti/backoff/v4 v4.1.1
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
|
||||||
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
github.com/cobaugh/osrelease v0.0.0-20181218015638-a93a0a55a249
|
||||||
github.com/creasty/defaults v1.5.2
|
github.com/creasty/defaults v1.5.1
|
||||||
github.com/docker/docker v20.10.10+incompatible
|
github.com/docker/docker v20.10.7+incompatible
|
||||||
github.com/docker/go-connections v0.4.0
|
github.com/docker/go-connections v0.4.0
|
||||||
github.com/fatih/color v1.13.0
|
github.com/fatih/color v1.12.0
|
||||||
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
github.com/franela/goblin v0.0.0-20200825194134-80c0062ed6cd
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/gabriel-vasile/mimetype v1.3.1
|
||||||
github.com/gabriel-vasile/mimetype v1.4.0
|
|
||||||
github.com/gammazero/workerpool v1.1.2
|
github.com/gammazero/workerpool v1.1.2
|
||||||
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
github.com/gbrlsnchs/jwt/v3 v3.0.1
|
||||||
github.com/gin-gonic/gin v1.7.4
|
github.com/gin-gonic/gin v1.7.2
|
||||||
github.com/go-playground/validator/v10 v10.9.0 // indirect
|
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/mux v1.7.4 // indirect
|
|
||||||
github.com/gorilla/websocket v1.4.2
|
github.com/gorilla/websocket v1.4.2
|
||||||
github.com/iancoleman/strcase v0.2.0
|
github.com/iancoleman/strcase v0.2.0
|
||||||
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
|
github.com/icza/dyno v0.0.0-20210726202311-f1bafe5d9996
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
|
||||||
github.com/juju/ratelimit v1.0.1
|
github.com/juju/ratelimit v1.0.1
|
||||||
github.com/karrick/godirwalk v1.16.1
|
github.com/karrick/godirwalk v1.16.1
|
||||||
github.com/klauspost/compress v1.13.6 // indirect
|
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/magefile/mage v1.11.0 // indirect
|
|
||||||
github.com/magiconair/properties v1.8.5
|
github.com/magiconair/properties v1.8.5
|
||||||
github.com/mattn/go-colorable v0.1.11
|
github.com/mattn/go-colorable v0.1.8
|
||||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
github.com/mholt/archiver/v3 v3.5.0
|
||||||
github.com/mholt/archiver/v3 v3.5.1
|
|
||||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
|
||||||
github.com/morikuni/aec v1.0.0 // indirect
|
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
|
||||||
github.com/nwaples/rardecode v1.1.2 // indirect
|
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pierrec/lz4/v4 v4.1.10 // indirect
|
|
||||||
github.com/pkg/profile v1.6.0
|
github.com/pkg/profile v1.6.0
|
||||||
github.com/pkg/sftp v1.13.4
|
github.com/pkg/sftp v1.13.2
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
|
|
||||||
github.com/spf13/cobra v1.2.1
|
github.com/spf13/cobra v1.2.1
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/ugorji/go v1.2.6 // indirect
|
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
|
||||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
|
||||||
go.uber.org/multierr v1.7.0 // indirect
|
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
|
|
||||||
golang.org/x/net v0.0.0-20211104170005-ce137452f963 // indirect
|
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b // indirect
|
gopkg.in/ini.v1 v1.62.0
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
|
||||||
golang.org/x/text v0.3.7 // indirect
|
|
||||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
|
||||||
golang.org/x/tools v0.1.5 // indirect
|
|
||||||
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247 // indirect
|
|
||||||
google.golang.org/grpc v1.42.0 // indirect
|
|
||||||
gopkg.in/ini.v1 v1.63.2
|
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
|
github.com/Microsoft/go-winio v0.5.0 // indirect
|
||||||
|
github.com/Microsoft/hcsshim v0.8.20 // indirect
|
||||||
|
github.com/andybalholm/brotli v1.0.3 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||||
|
github.com/containerd/containerd v1.5.5 // indirect
|
||||||
|
github.com/containerd/fifo v1.0.0 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||||
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
|
github.com/docker/go-units v0.4.0 // indirect
|
||||||
|
github.com/dsnet/compress v0.0.1 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||||
|
github.com/gammazero/deque v0.1.0 // indirect
|
||||||
|
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||||
|
github.com/go-playground/locales v0.13.0 // indirect
|
||||||
|
github.com/go-playground/universal-translator v0.17.0 // indirect
|
||||||
|
github.com/go-playground/validator/v10 v10.8.0 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
|
github.com/gorilla/mux v1.7.4 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
|
github.com/json-iterator/go v1.1.11 // indirect
|
||||||
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||||
|
github.com/klauspost/compress v1.13.2 // indirect
|
||||||
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
|
github.com/leodido/go-urn v1.2.1 // indirect
|
||||||
|
github.com/magefile/mage v1.11.0 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.13 // indirect
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
|
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||||
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
|
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||||
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||||
|
github.com/nwaples/rardecode v1.1.1 // indirect
|
||||||
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
|
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.8 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/prometheus/client_golang v1.11.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
|
github.com/prometheus/common v0.30.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.7.1 // indirect
|
||||||
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
|
github.com/ugorji/go/codec v1.1.7 // indirect
|
||||||
|
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||||
|
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||||
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
|
go.uber.org/multierr v1.7.0 // indirect
|
||||||
|
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985 // indirect
|
||||||
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect
|
||||||
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||||
|
golang.org/x/text v0.3.6 // indirect
|
||||||
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
|
google.golang.org/genproto v0.0.0-20210729151513-df9385d47c1b // indirect
|
||||||
|
google.golang.org/grpc v1.39.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
|
)
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package notify
|
package notify
|
||||||
|
|
|
@ -15,9 +15,11 @@ import (
|
||||||
"github.com/mattn/go-colorable"
|
"github.com/mattn/go-colorable"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Default = New(os.Stderr, true)
|
var (
|
||||||
var bold = color2.New(color2.Bold)
|
Default = New(os.Stderr, true)
|
||||||
var boldred = color2.New(color2.Bold, color2.FgRed)
|
bold = color2.New(color2.Bold)
|
||||||
|
boldred = color2.New(color2.Bold, color2.FgRed)
|
||||||
|
)
|
||||||
|
|
||||||
var Strings = [...]string{
|
var Strings = [...]string{
|
||||||
log.DebugLevel: "DEBUG",
|
log.DebugLevel: "DEBUG",
|
||||||
|
|
|
@ -2,7 +2,7 @@ package parser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -38,13 +38,13 @@ var xmlValueMatchRegex = regexp.MustCompile(`^\[([\w]+)='(.*)'\]$`)
|
||||||
// Gets the []byte representation of a configuration file to be passed through to other
|
// Gets the []byte representation of a configuration file to be passed through to other
|
||||||
// handler functions. If the file does not currently exist, it will be created.
|
// handler functions. If the file does not currently exist, it will be created.
|
||||||
func readFileBytes(path string) ([]byte, error) {
|
func readFileBytes(path string) ([]byte, error) {
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0644)
|
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
return ioutil.ReadAll(file)
|
return io.ReadAll(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets the value of a key based on the value type defined.
|
// Gets the value of a key based on the value type defined.
|
||||||
|
|
|
@ -3,7 +3,6 @@ package parser
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -212,7 +211,7 @@ func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
b := strings.TrimSuffix(path, filepath.Base(path))
|
b := strings.TrimSuffix(path, filepath.Base(path))
|
||||||
if err := os.MkdirAll(b, 0755); err != nil {
|
if err := os.MkdirAll(b, 0o755); err != nil {
|
||||||
return errors.WithMessage(err, "failed to create base directory for missing configuration file")
|
return errors.WithMessage(err, "failed to create base directory for missing configuration file")
|
||||||
} else {
|
} else {
|
||||||
if _, err := os.Create(path); err != nil {
|
if _, err := os.Create(path); err != nil {
|
||||||
|
@ -229,7 +228,7 @@ func (f *ConfigurationFile) Parse(path string, internal bool) error {
|
||||||
// Parses an xml file.
|
// Parses an xml file.
|
||||||
func (f *ConfigurationFile) parseXmlFile(path string) error {
|
func (f *ConfigurationFile) parseXmlFile(path string) error {
|
||||||
doc := etree.NewDocument()
|
doc := etree.NewDocument()
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0644)
|
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -322,7 +321,7 @@ func (f *ConfigurationFile) parseIniFile(path string) error {
|
||||||
// Ini package can't handle a non-existent file, so handle that automatically here
|
// Ini package can't handle a non-existent file, so handle that automatically here
|
||||||
// by creating it if not exists. Then, immediately close the file since we will use
|
// by creating it if not exists. Then, immediately close the file since we will use
|
||||||
// other methods to write the new contents.
|
// other methods to write the new contents.
|
||||||
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0644)
|
file, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -334,7 +333,29 @@ func (f *ConfigurationFile) parseIniFile(path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, replacement := range f.Replace {
|
for _, replacement := range f.Replace {
|
||||||
path := strings.SplitN(replacement.Match, ".", 2)
|
var (
|
||||||
|
path []string
|
||||||
|
bracketDepth int
|
||||||
|
v []int32
|
||||||
|
)
|
||||||
|
for _, c := range replacement.Match {
|
||||||
|
switch c {
|
||||||
|
case '[':
|
||||||
|
bracketDepth++
|
||||||
|
case ']':
|
||||||
|
bracketDepth--
|
||||||
|
case '.':
|
||||||
|
if bracketDepth > 0 || len(path) == 1 {
|
||||||
|
v = append(v, c)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
path = append(path, string(v))
|
||||||
|
v = v[:0]
|
||||||
|
default:
|
||||||
|
v = append(v, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
path = append(path, string(v))
|
||||||
|
|
||||||
value, err := f.LookupConfigurationValue(replacement)
|
value, err := f.LookupConfigurationValue(replacement)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -387,7 +408,7 @@ func (f *ConfigurationFile) parseJsonFile(path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
output := []byte(data.StringIndent("", " "))
|
output := []byte(data.StringIndent("", " "))
|
||||||
return ioutil.WriteFile(path, output, 0644)
|
return os.WriteFile(path, output, 0o644)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a yaml file and updates any matching key/value pairs before persisting
|
// Parses a yaml file and updates any matching key/value pairs before persisting
|
||||||
|
@ -424,14 +445,14 @@ func (f *ConfigurationFile) parseYamlFile(path string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return ioutil.WriteFile(path, marshaled, 0644)
|
return os.WriteFile(path, marshaled, 0o644)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a text file using basic find and replace. This is a highly inefficient method of
|
// Parses a text file using basic find and replace. This is a highly inefficient method of
|
||||||
// scanning a file and performing a replacement. You should attempt to use anything other
|
// scanning a file and performing a replacement. You should attempt to use anything other
|
||||||
// than this function where possible.
|
// than this function where possible.
|
||||||
func (f *ConfigurationFile) parseTextFile(path string) error {
|
func (f *ConfigurationFile) parseTextFile(path string) error {
|
||||||
input, err := ioutil.ReadFile(path)
|
input, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -449,7 +470,7 @@ func (f *ConfigurationFile) parseTextFile(path string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := ioutil.WriteFile(path, []byte(strings.Join(lines, "\n")), 0644); err != nil {
|
if err := os.WriteFile(path, []byte(strings.Join(lines, "\n")), 0o644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -545,7 +566,7 @@ func (f *ConfigurationFile) parsePropertiesFile(path string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open the file for writing.
|
// Open the file for writing.
|
||||||
w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
w, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,8 +57,7 @@ func (re *RequestError) StatusCode() int {
|
||||||
return re.response.StatusCode
|
return re.response.StatusCode
|
||||||
}
|
}
|
||||||
|
|
||||||
type SftpInvalidCredentialsError struct {
|
type SftpInvalidCredentialsError struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (ice SftpInvalidCredentialsError) Error() string {
|
func (ice SftpInvalidCredentialsError) Error() string {
|
||||||
return "the credentials provided were invalid"
|
return "the credentials provided were invalid"
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -224,9 +223,9 @@ func (r *Response) Read() ([]byte, error) {
|
||||||
return nil, errors.New("remote: attempting to read missing response")
|
return nil, errors.New("remote: attempting to read missing response")
|
||||||
}
|
}
|
||||||
if r.Response.Body != nil {
|
if r.Response.Body != nil {
|
||||||
b, _ = ioutil.ReadAll(r.Response.Body)
|
b, _ = io.ReadAll(r.Response.Body)
|
||||||
}
|
}
|
||||||
r.Response.Body = ioutil.NopCloser(bytes.NewBuffer(b))
|
r.Response.Body = io.NopCloser(bytes.NewBuffer(b))
|
||||||
return b, nil
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,6 @@ func TestPost(t *testing.T) {
|
||||||
}
|
}
|
||||||
c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) {
|
c, _ := createTestClient(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
assert.Equal(t, http.MethodPost, r.Method)
|
assert.Equal(t, http.MethodPost, r.Method)
|
||||||
|
|
||||||
})
|
})
|
||||||
r, err := c.Post(context.Background(), "/test", test)
|
r, err := c.Post(context.Background(), "/test", test)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -188,6 +188,8 @@ func deleteServer(c *gin.Context) {
|
||||||
// as well.
|
// as well.
|
||||||
s.CtxCancel()
|
s.CtxCancel()
|
||||||
s.Events().Destroy()
|
s.Events().Destroy()
|
||||||
|
s.LogSink().Destroy()
|
||||||
|
s.InstallSink().Destroy()
|
||||||
s.Websockets().CancelAll()
|
s.Websockets().CancelAll()
|
||||||
|
|
||||||
// Remove any pending remote file downloads for the server.
|
// Remove any pending remote file downloads for the server.
|
||||||
|
|
|
@ -103,15 +103,17 @@ func postUpdateConfiguration(c *gin.Context) {
|
||||||
if err := c.BindJSON(&cfg); err != nil {
|
if err := c.BindJSON(&cfg); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep the SSL certificates the same since the Panel will send through Lets Encrypt
|
// Keep the SSL certificates the same since the Panel will send through Lets Encrypt
|
||||||
// default locations. However, if we picked a different location manually we don't
|
// default locations. However, if we picked a different location manually we don't
|
||||||
// want to override that.
|
// want to override that.
|
||||||
//
|
//
|
||||||
// If you pass through manual locations in the API call this logic will be skipped.
|
// If you pass through manual locations in the API call this logic will be skipped.
|
||||||
if strings.HasPrefix(cfg.Api.Ssl.KeyFile, "/etc/letsencrypt/live/") {
|
if strings.HasPrefix(cfg.Api.Ssl.KeyFile, "/etc/letsencrypt/live/") {
|
||||||
cfg.Api.Ssl.KeyFile = strings.ToLower(config.Get().Api.Ssl.KeyFile)
|
cfg.Api.Ssl.KeyFile = config.Get().Api.Ssl.KeyFile
|
||||||
cfg.Api.Ssl.CertificateFile = strings.ToLower(config.Get().Api.Ssl.CertificateFile)
|
cfg.Api.Ssl.CertificateFile = config.Get().Api.Ssl.CertificateFile
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to write this new configuration to the disk before updating our global
|
// Try to write this new configuration to the disk before updating our global
|
||||||
// state with it.
|
// state with it.
|
||||||
if err := config.WriteToDisk(cfg); err != nil {
|
if err := config.WriteToDisk(cfg); err != nil {
|
||||||
|
|
|
@ -2,6 +2,7 @@ package websocket
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -53,9 +54,9 @@ func (h *Handler) listenForExpiration(ctx context.Context) {
|
||||||
jwt := h.GetJwt()
|
jwt := h.GetJwt()
|
||||||
if jwt != nil {
|
if jwt != nil {
|
||||||
if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 0 {
|
if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 0 {
|
||||||
_ = h.SendJson(&Message{Event: TokenExpiredEvent})
|
_ = h.SendJson(Message{Event: TokenExpiredEvent})
|
||||||
} else if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 60 {
|
} else if jwt.ExpirationTime.Unix()-time.Now().Unix() <= 60 {
|
||||||
_ = h.SendJson(&Message{Event: TokenExpiringEvent})
|
_ = h.SendJson(Message{Event: TokenExpiringEvent})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -86,44 +87,71 @@ func (h *Handler) listenForServerEvents(ctx context.Context) error {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
c := make(chan events.Event)
|
eventChan := make(chan events.Event)
|
||||||
|
logOutput := make(chan []byte)
|
||||||
|
installOutput := make(chan []byte)
|
||||||
|
h.server.Events().On(eventChan, e...)
|
||||||
|
h.server.LogSink().On(logOutput)
|
||||||
|
h.server.InstallSink().On(installOutput)
|
||||||
|
|
||||||
callback := func(e events.Event) {
|
onError := func(evt string, err2 error) {
|
||||||
c <- e
|
h.Logger().WithField("event", evt).WithField("error", err2).Error("failed to send event over server websocket")
|
||||||
}
|
// Avoid race conditions by only setting the error once and then canceling
|
||||||
|
// the context. This way if additional processing errors come through due
|
||||||
// Subscribe to all of the events with the same callback that will push the
|
// to a massive flood of things you still only report and stop at the first.
|
||||||
// data out over the websocket for the server.
|
o.Do(func() {
|
||||||
for _, evt := range e {
|
err = err2
|
||||||
h.server.Events().On(evt, &callback)
|
})
|
||||||
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
break
|
break
|
||||||
case e := <-c:
|
case e := <-logOutput:
|
||||||
sendErr := h.SendJson(&Message{Event: e.Topic, Args: []string{e.Data}})
|
sendErr := h.SendJson(Message{Event: server.ConsoleOutputEvent, Args: []string{string(e)}})
|
||||||
if sendErr == nil {
|
if sendErr == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
onError(server.ConsoleOutputEvent, sendErr)
|
||||||
|
case e := <-installOutput:
|
||||||
|
sendErr := h.SendJson(Message{Event: server.InstallOutputEvent, Args: []string{string(e)}})
|
||||||
|
if sendErr == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
onError(server.InstallOutputEvent, sendErr)
|
||||||
|
case e := <-eventChan:
|
||||||
|
var sendErr error
|
||||||
|
message := Message{Event: e.Topic}
|
||||||
|
if str, ok := e.Data.(string); ok {
|
||||||
|
message.Args = []string{str}
|
||||||
|
} else if b, ok := e.Data.([]byte); ok {
|
||||||
|
message.Args = []string{string(b)}
|
||||||
|
} else {
|
||||||
|
b, sendErr = json.Marshal(e.Data)
|
||||||
|
if sendErr == nil {
|
||||||
|
message.Args = []string{string(b)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
h.Logger().WithField("event", e.Topic).WithField("error", sendErr).Error("failed to send event over server websocket")
|
if sendErr == nil {
|
||||||
// Avoid race conditions by only setting the error once and then canceling
|
sendErr = h.SendJson(message)
|
||||||
// the context. This way if additional processing errors come through due
|
if sendErr == nil {
|
||||||
// to a massive flood of things you still only report and stop at the first.
|
continue
|
||||||
o.Do(func() {
|
}
|
||||||
err = sendErr
|
}
|
||||||
})
|
onError(message.Event, sendErr)
|
||||||
cancel()
|
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, evt := range e {
|
h.server.Events().Off(eventChan, e...)
|
||||||
h.server.Events().Off(evt, &callback)
|
h.server.InstallSink().Off(logOutput)
|
||||||
}
|
h.server.InstallSink().Off(installOutput)
|
||||||
close(c)
|
close(eventChan)
|
||||||
|
close(logOutput)
|
||||||
|
close(installOutput)
|
||||||
|
|
||||||
// If the internal context is stopped it is either because the parent context
|
// If the internal context is stopped it is either because the parent context
|
||||||
// got canceled or because we ran into an error. If the "err" variable is nil
|
// got canceled or because we ran into an error. If the "err" variable is nil
|
||||||
|
|
|
@ -122,7 +122,7 @@ func (h *Handler) Logger() *log.Entry {
|
||||||
WithField("server", h.server.ID())
|
WithField("server", h.server.ID())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) SendJson(v *Message) error {
|
func (h *Handler) SendJson(v Message) error {
|
||||||
// Do not send JSON down the line if the JWT on the connection is not valid!
|
// Do not send JSON down the line if the JWT on the connection is not valid!
|
||||||
if err := h.TokenValid(); err != nil {
|
if err := h.TokenValid(); err != nil {
|
||||||
_ = h.unsafeSendJson(Message{
|
_ = h.unsafeSendJson(Message{
|
||||||
|
@ -314,7 +314,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
|
||||||
// On every authentication event, send the current server status back
|
// On every authentication event, send the current server status back
|
||||||
// to the client. :)
|
// to the client. :)
|
||||||
state := h.server.Environment.State()
|
state := h.server.Environment.State()
|
||||||
_ = h.SendJson(&Message{
|
_ = h.SendJson(Message{
|
||||||
Event: server.StatusEvent,
|
Event: server.StatusEvent,
|
||||||
Args: []string{state},
|
Args: []string{state},
|
||||||
})
|
})
|
||||||
|
@ -326,7 +326,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
|
||||||
_ = h.server.Filesystem().HasSpaceAvailable(false)
|
_ = h.server.Filesystem().HasSpaceAvailable(false)
|
||||||
|
|
||||||
b, _ := json.Marshal(h.server.Proc())
|
b, _ := json.Marshal(h.server.Proc())
|
||||||
_ = h.SendJson(&Message{
|
_ = h.SendJson(Message{
|
||||||
Event: server.StatsEvent,
|
Event: server.StatsEvent,
|
||||||
Args: []string{string(b)},
|
Args: []string{string(b)},
|
||||||
})
|
})
|
||||||
|
@ -356,7 +356,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
|
||||||
if errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
m, _ := h.GetErrorMessage("another power action is currently being processed for this server, please try again later")
|
m, _ := h.GetErrorMessage("another power action is currently being processed for this server, please try again later")
|
||||||
|
|
||||||
_ = h.SendJson(&Message{
|
_ = h.SendJson(Message{
|
||||||
Event: ErrorEvent,
|
Event: ErrorEvent,
|
||||||
Args: []string{m},
|
Args: []string{m},
|
||||||
})
|
})
|
||||||
|
@ -380,7 +380,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, line := range logs {
|
for _, line := range logs {
|
||||||
_ = h.SendJson(&Message{
|
_ = h.SendJson(Message{
|
||||||
Event: server.ConsoleOutputEvent,
|
Event: server.ConsoleOutputEvent,
|
||||||
Args: []string{line},
|
Args: []string{line},
|
||||||
})
|
})
|
||||||
|
@ -391,7 +391,7 @@ func (h *Handler) HandleInbound(ctx context.Context, m Message) error {
|
||||||
case SendStatsEvent:
|
case SendStatsEvent:
|
||||||
{
|
{
|
||||||
b, _ := json.Marshal(h.server.Proc())
|
b, _ := json.Marshal(h.server.Proc())
|
||||||
_ = h.SendJson(&Message{
|
_ = h.SendJson(Message{
|
||||||
Event: server.StatsEvent,
|
Event: server.StatsEvent,
|
||||||
Args: []string{string(b)},
|
Args: []string{string(b)},
|
||||||
})
|
})
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
Name: ptero-wings
|
Name: ptero-wings
|
||||||
Version: 1.5.0
|
Version: 1.5.3
|
||||||
Release: 1%{?dist}
|
Release: 1%{?dist}
|
||||||
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
|
Summary: The server control plane for Pterodactyl Panel. Written from the ground-up with security, speed, and stability in mind.
|
||||||
BuildArch: x86_64
|
BuildArch: x86_64
|
||||||
|
@ -91,6 +91,13 @@ rm -rf /var/log/pterodactyl
|
||||||
wings --version
|
wings --version
|
||||||
|
|
||||||
%changelog
|
%changelog
|
||||||
|
* Wed Oct 27 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.3-1
|
||||||
|
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
|
||||||
|
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.3
|
||||||
|
- Fixes improper event registration and error handling during socket authentication that would cause the incorrect error message to be returned to the client, or no error in some scenarios. Event registration is now delayed until the socket is fully authenticated to ensure needless listeners are not registed.
|
||||||
|
- Fixes dollar signs always being evaluated as environment variables with no way to escape them. They can now be escaped as $$ which will transform into a single dollar sign.
|
||||||
|
- A websocket connection to a server will be closed by Wings if there is a send error encountered and the client will be left to handle reconnections, rather than simply logging the error and continuing to listen for new events.
|
||||||
|
|
||||||
* Sun Sep 12 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.0-1
|
* Sun Sep 12 2021 Capitol Hosting Solutions Systems Engineering <syseng@chs.gg> - 1.5.0-1
|
||||||
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
|
- specfile by Capitol Hosting Solutions, Upstream by Pterodactyl
|
||||||
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.0
|
- Rebased for https://github.com/pterodactyl/wings/releases/tag/v1.5.0
|
||||||
|
|
|
@ -3,7 +3,6 @@ package server
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -49,7 +48,7 @@ func (s *Server) getServerwideIgnoredFiles() (string, error) {
|
||||||
// Don't read a symlinked ignore file, or a file larger than 32KiB in size.
|
// Don't read a symlinked ignore file, or a file larger than 32KiB in size.
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
b, err := ioutil.ReadAll(f)
|
b, err := io.ReadAll(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -80,7 +79,7 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
||||||
s.Log().WithField("backup", b.Identifier()).Info("notified panel of failed backup state")
|
s.Log().WithField("backup", b.Identifier()).Info("notified panel of failed backup state")
|
||||||
}
|
}
|
||||||
|
|
||||||
_ = s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
s.Events().Publish(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
||||||
"uuid": b.Identifier(),
|
"uuid": b.Identifier(),
|
||||||
"is_successful": false,
|
"is_successful": false,
|
||||||
"checksum": "",
|
"checksum": "",
|
||||||
|
@ -104,7 +103,7 @@ func (s *Server) Backup(b backup.BackupInterface) error {
|
||||||
|
|
||||||
// Emit an event over the socket so we can update the backup in realtime on
|
// Emit an event over the socket so we can update the backup in realtime on
|
||||||
// the frontend for the server.
|
// the frontend for the server.
|
||||||
_ = s.Events().PublishJson(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
s.Events().Publish(BackupCompletedEvent+":"+b.Identifier(), map[string]interface{}{
|
||||||
"uuid": b.Identifier(),
|
"uuid": b.Identifier(),
|
||||||
"is_successful": true,
|
"is_successful": true,
|
||||||
"checksum": ad.Checksum,
|
"checksum": ad.Checksum,
|
||||||
|
|
|
@ -12,8 +12,7 @@ var (
|
||||||
ErrServerIsRestoring = errors.New("server is currently being restored")
|
ErrServerIsRestoring = errors.New("server is currently being restored")
|
||||||
)
|
)
|
||||||
|
|
||||||
type crashTooFrequent struct {
|
type crashTooFrequent struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (e *crashTooFrequent) Error() string {
|
func (e *crashTooFrequent) Error() string {
|
||||||
return "server has crashed too soon after the last detected crash"
|
return "server has crashed too soon after the last detected crash"
|
||||||
|
@ -25,8 +24,7 @@ func IsTooFrequentCrashError(err error) bool {
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverDoesNotExist struct {
|
type serverDoesNotExist struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (e *serverDoesNotExist) Error() string {
|
func (e *serverDoesNotExist) Error() string {
|
||||||
return "server does not exist on remote system"
|
return "server does not exist on remote system"
|
||||||
|
|
|
@ -21,12 +21,12 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns the server's emitter instance.
|
// Returns the server's emitter instance.
|
||||||
func (s *Server) Events() *events.EventBus {
|
func (s *Server) Events() *events.Bus {
|
||||||
s.emitterLock.Lock()
|
s.emitterLock.Lock()
|
||||||
defer s.emitterLock.Unlock()
|
defer s.emitterLock.Unlock()
|
||||||
|
|
||||||
if s.emitter == nil {
|
if s.emitter == nil {
|
||||||
s.emitter = events.New()
|
s.emitter = events.NewBus()
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.emitter
|
return s.emitter
|
||||||
|
|
|
@ -45,7 +45,7 @@ type Archive struct {
|
||||||
// Create creates an archive at dst with all of the files defined in the
|
// Create creates an archive at dst with all of the files defined in the
|
||||||
// included files struct.
|
// included files struct.
|
||||||
func (a *Archive) Create(dst string) error {
|
func (a *Archive) Create(dst string) error {
|
||||||
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package filesystem
|
package filesystem
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"os"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
@ -19,11 +19,10 @@ func TestFilesystem_DecompressFile(t *testing.T) {
|
||||||
fs, rfs := NewFs()
|
fs, rfs := NewFs()
|
||||||
|
|
||||||
g.Describe("Decompress", func() {
|
g.Describe("Decompress", func() {
|
||||||
|
|
||||||
for _, ext := range []string{"zip", "rar", "tar", "tar.gz"} {
|
for _, ext := range []string{"zip", "rar", "tar", "tar.gz"} {
|
||||||
g.It("can decompress a "+ext, func() {
|
g.It("can decompress a "+ext, func() {
|
||||||
// copy the file to the new FS
|
// copy the file to the new FS
|
||||||
c, err := ioutil.ReadFile("./testdata/test." + ext)
|
c, err := os.ReadFile("./testdata/test." + ext)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
err = rfs.CreateServerFile("./test."+ext, c)
|
err = rfs.CreateServerFile("./test."+ext, c)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
|
@ -85,7 +85,7 @@ func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f, err := os.OpenFile(cleaned, flag, 0644)
|
f, err := os.OpenFile(cleaned, flag, 0o644)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
||||||
if _, err := os.Stat(filepath.Dir(cleaned)); errors.Is(err, os.ErrNotExist) {
|
if _, err := os.Stat(filepath.Dir(cleaned)); errors.Is(err, os.ErrNotExist) {
|
||||||
// Create the path leading up to the file we're trying to create, setting the final perms
|
// Create the path leading up to the file we're trying to create, setting the final perms
|
||||||
// on it as we go.
|
// on it as we go.
|
||||||
if err := os.MkdirAll(filepath.Dir(cleaned), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Dir(cleaned), 0o755); err != nil {
|
||||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to create directory tree")
|
return nil, errors.Wrap(err, "server/filesystem: touch: failed to create directory tree")
|
||||||
}
|
}
|
||||||
if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
|
if err := fs.Chown(filepath.Dir(cleaned)); err != nil {
|
||||||
|
@ -107,7 +107,7 @@ func (fs *Filesystem) Touch(p string, flag int) (*os.File, error) {
|
||||||
o := &fileOpener{}
|
o := &fileOpener{}
|
||||||
// Try to open the file now that we have created the pathing necessary for it, and then
|
// Try to open the file now that we have created the pathing necessary for it, and then
|
||||||
// Chown that file so that the permissions don't mess with things.
|
// Chown that file so that the permissions don't mess with things.
|
||||||
f, err = o.open(cleaned, flag, 0644)
|
f, err = o.open(cleaned, flag, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file with wait")
|
return nil, errors.Wrap(err, "server/filesystem: touch: failed to open file with wait")
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ func (fs *Filesystem) CreateDirectory(name string, p string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return os.MkdirAll(cleaned, 0755)
|
return os.MkdirAll(cleaned, 0o755)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves (or renames) a file or directory.
|
// Moves (or renames) a file or directory.
|
||||||
|
@ -210,7 +210,7 @@ func (fs *Filesystem) Rename(from string, to string) error {
|
||||||
// Ensure that the directory we're moving into exists correctly on the system. Only do this if
|
// Ensure that the directory we're moving into exists correctly on the system. Only do this if
|
||||||
// we're not at the root directory level.
|
// we're not at the root directory level.
|
||||||
if d != fs.Path() {
|
if d != fs.Path() {
|
||||||
if mkerr := os.MkdirAll(d, 0755); mkerr != nil {
|
if mkerr := os.MkdirAll(d, 0o755); mkerr != nil {
|
||||||
return errors.WithMessage(mkerr, "failed to create directory structure for file rename")
|
return errors.WithMessage(mkerr, "failed to create directory structure for file rename")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -377,7 +377,7 @@ func (fs *Filesystem) TruncateRootDirectory() error {
|
||||||
if err := os.RemoveAll(fs.Path()); err != nil {
|
if err := os.RemoveAll(fs.Path()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := os.Mkdir(fs.Path(), 0755); err != nil {
|
if err := os.Mkdir(fs.Path(), 0o755); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
atomic.StoreInt64(&fs.diskUsed, 0)
|
atomic.StoreInt64(&fs.diskUsed, 0)
|
||||||
|
@ -485,7 +485,7 @@ func (fs *Filesystem) ListDirectory(p string) ([]Stat, error) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
var m *mimetype.MIME
|
var m *mimetype.MIME
|
||||||
var d = "inode/directory"
|
d := "inode/directory"
|
||||||
if !f.IsDir() {
|
if !f.IsDir() {
|
||||||
cleanedp := filepath.Join(cleaned, f.Name())
|
cleanedp := filepath.Join(cleaned, f.Name())
|
||||||
if f.Mode()&os.ModeSymlink != 0 {
|
if f.Mode()&os.ModeSymlink != 0 {
|
||||||
|
|
|
@ -3,7 +3,6 @@ package filesystem
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -25,7 +24,7 @@ func NewFs() (*Filesystem, *rootFs) {
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "pterodactyl")
|
tmpDir, err := os.MkdirTemp(os.TempDir(), "pterodactyl")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -71,7 +70,7 @@ func (rfs *rootFs) reset() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Mkdir(filepath.Join(rfs.root, "/server"), 0755); err != nil {
|
if err := os.Mkdir(filepath.Join(rfs.root, "/server"), 0o755); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -99,7 +98,7 @@ func TestFilesystem_Readfile(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("returns an error if the \"file\" is a directory", func() {
|
g.It("returns an error if the \"file\" is a directory", func() {
|
||||||
err := os.Mkdir(filepath.Join(rfs.root, "/server/test.txt"), 0755)
|
err := os.Mkdir(filepath.Join(rfs.root, "/server/test.txt"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Readfile("test.txt", buf)
|
err = fs.Readfile("test.txt", buf)
|
||||||
|
@ -341,7 +340,7 @@ func TestFilesystem_Rename(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("allows a folder to be renamed", func() {
|
g.It("allows a folder to be renamed", func() {
|
||||||
err := os.Mkdir(filepath.Join(rfs.root, "/server/source_dir"), 0755)
|
err := os.Mkdir(filepath.Join(rfs.root, "/server/source_dir"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Rename("source_dir", "target_dir")
|
err = fs.Rename("source_dir", "target_dir")
|
||||||
|
@ -405,7 +404,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source directory is outside the root", func() {
|
g.It("should return an error if the source directory is outside the root", func() {
|
||||||
err := os.MkdirAll(filepath.Join(rfs.root, "/nested/in/dir"), 0755)
|
err := os.MkdirAll(filepath.Join(rfs.root, "/nested/in/dir"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = rfs.CreateServerFileFromString("/../nested/in/dir/ext-source.txt", "external content")
|
err = rfs.CreateServerFileFromString("/../nested/in/dir/ext-source.txt", "external content")
|
||||||
|
@ -421,7 +420,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should return an error if the source is a directory", func() {
|
g.It("should return an error if the source is a directory", func() {
|
||||||
err := os.Mkdir(filepath.Join(rfs.root, "/server/dir"), 0755)
|
err := os.Mkdir(filepath.Join(rfs.root, "/server/dir"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = fs.Copy("dir")
|
err = fs.Copy("dir")
|
||||||
|
@ -466,7 +465,7 @@ func TestFilesystem_Copy(t *testing.T) {
|
||||||
})
|
})
|
||||||
|
|
||||||
g.It("should create a copy inside of a directory", func() {
|
g.It("should create a copy inside of a directory", func() {
|
||||||
err := os.MkdirAll(filepath.Join(rfs.root, "/server/nested/in/dir"), 0755)
|
err := os.MkdirAll(filepath.Join(rfs.root, "/server/nested/in/dir"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
err = rfs.CreateServerFileFromString("nested/in/dir/source.txt", "test content")
|
err = rfs.CreateServerFileFromString("nested/in/dir/source.txt", "test content")
|
||||||
|
@ -545,7 +544,7 @@ func TestFilesystem_Delete(t *testing.T) {
|
||||||
"foo/bar/baz/source.txt",
|
"foo/bar/baz/source.txt",
|
||||||
}
|
}
|
||||||
|
|
||||||
err := os.MkdirAll(filepath.Join(rfs.root, "/server/foo/bar/baz"), 0755)
|
err := os.MkdirAll(filepath.Join(rfs.root, "/server/foo/bar/baz"), 0o755)
|
||||||
g.Assert(err).IsNil()
|
g.Assert(err).IsNil()
|
||||||
|
|
||||||
for _, s := range sources {
|
for _, s := range sources {
|
||||||
|
|
|
@ -115,8 +115,8 @@ func (fs *Filesystem) ParallelSafePath(paths []string) ([]string, error) {
|
||||||
var cleaned []string
|
var cleaned []string
|
||||||
|
|
||||||
// Simple locker function to avoid racy appends to the array of cleaned paths.
|
// Simple locker function to avoid racy appends to the array of cleaned paths.
|
||||||
var m = new(sync.Mutex)
|
m := new(sync.Mutex)
|
||||||
var push = func(c string) {
|
push := func(c string) {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
cleaned = append(cleaned, c)
|
cleaned = append(cleaned, c)
|
||||||
m.Unlock()
|
m.Unlock()
|
||||||
|
|
|
@ -107,7 +107,7 @@ func TestFilesystem_Blocks_Symlinks(t *testing.T) {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Mkdir(filepath.Join(rfs.root, "/malicious_dir"), 0777); err != nil {
|
if err := os.Mkdir(filepath.Join(rfs.root, "/malicious_dir"), 0o777); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -215,11 +215,11 @@ func (ip *InstallationProcess) tempDir() string {
|
||||||
func (ip *InstallationProcess) writeScriptToDisk() error {
|
func (ip *InstallationProcess) writeScriptToDisk() error {
|
||||||
// Make sure the temp directory root exists before trying to make a directory within it. The
|
// Make sure the temp directory root exists before trying to make a directory within it. The
|
||||||
// ioutil.TempDir call expects this base to exist, it won't create it for you.
|
// ioutil.TempDir call expects this base to exist, it won't create it for you.
|
||||||
if err := os.MkdirAll(ip.tempDir(), 0700); err != nil {
|
if err := os.MkdirAll(ip.tempDir(), 0o700); err != nil {
|
||||||
return errors.WithMessage(err, "could not create temporary directory for install process")
|
return errors.WithMessage(err, "could not create temporary directory for install process")
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.OpenFile(filepath.Join(ip.tempDir(), "install.sh"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
f, err := os.OpenFile(filepath.Join(ip.tempDir(), "install.sh"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithMessage(err, "failed to write server installation script to disk before mount")
|
return errors.WithMessage(err, "failed to write server installation script to disk before mount")
|
||||||
}
|
}
|
||||||
|
@ -350,7 +350,7 @@ func (ip *InstallationProcess) AfterExecute(containerId string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.OpenFile(ip.GetLogPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
f, err := os.OpenFile(ip.GetLogPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -516,16 +516,12 @@ func (ip *InstallationProcess) StreamOutput(ctx context.Context, id string) erro
|
||||||
ShowStderr: true,
|
ShowStderr: true,
|
||||||
Follow: true,
|
Follow: true,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer reader.Close()
|
defer reader.Close()
|
||||||
|
|
||||||
evts := ip.Server.Events()
|
err = system.ScanReader(reader, ip.Server.InstallSink().Push)
|
||||||
err = system.ScanReader(reader, func(line string) {
|
|
||||||
evts.Publish(InstallOutputEvent, line)
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines")
|
ip.Server.Log().WithFields(log.Fields{"container_id": id, "error": err}).Warn("error processing install output lines")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -51,15 +50,11 @@ func (dsl *diskSpaceLimiter) Trigger() {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartEventListeners adds all the internal event listeners we want to use for a server. These listeners can only be
|
func (s *Server) processConsoleOutputEvent(v []byte) {
|
||||||
// removed by deleting the server as they should last for the duration of the process' lifetime.
|
|
||||||
func (s *Server) StartEventListeners() {
|
|
||||||
console := func(e events.Event) {
|
|
||||||
t := s.Throttler()
|
t := s.Throttler()
|
||||||
err := t.Increment(func() {
|
err := t.Increment(func() {
|
||||||
s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.")
|
s.PublishConsoleOutputFromDaemon("Your server is outputting too much data and is being throttled.")
|
||||||
})
|
})
|
||||||
|
|
||||||
// An error is only returned if the server has breached the thresholds set.
|
// An error is only returned if the server has breached the thresholds set.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the process is already stopping, just let it continue with that action rather than attempting
|
// If the process is already stopping, just let it continue with that action rather than attempting
|
||||||
|
@ -88,15 +83,27 @@ func (s *Server) StartEventListeners() {
|
||||||
|
|
||||||
// If we are not throttled, go ahead and output the data.
|
// If we are not throttled, go ahead and output the data.
|
||||||
if !t.Throttled() {
|
if !t.Throttled() {
|
||||||
s.Events().Publish(ConsoleOutputEvent, e.Data)
|
s.LogSink().Push(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Also pass the data along to the console output channel.
|
// Also pass the data along to the console output channel.
|
||||||
s.onConsoleOutput(e.Data)
|
s.onConsoleOutput(string(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StartEventListeners adds all the internal event listeners we want to use for a server. These listeners can only be
|
||||||
|
// removed by deleting the server as they should last for the duration of the process' lifetime.
|
||||||
|
func (s *Server) StartEventListeners() {
|
||||||
|
state := make(chan events.Event)
|
||||||
|
stats := make(chan events.Event)
|
||||||
|
docker := make(chan events.Event)
|
||||||
|
|
||||||
|
go func() {
|
||||||
l := newDiskLimiter(s)
|
l := newDiskLimiter(s)
|
||||||
state := func(e events.Event) {
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case e := <-state:
|
||||||
|
go func() {
|
||||||
// Reset the throttler when the process is started.
|
// Reset the throttler when the process is started.
|
||||||
if e.Data == environment.ProcessStartingState {
|
if e.Data == environment.ProcessStartingState {
|
||||||
l.Reset()
|
l.Reset()
|
||||||
|
@ -104,18 +111,12 @@ func (s *Server) StartEventListeners() {
|
||||||
}
|
}
|
||||||
|
|
||||||
s.OnStateChange()
|
s.OnStateChange()
|
||||||
}
|
}()
|
||||||
|
case e := <-stats:
|
||||||
stats := func(e events.Event) {
|
go func() {
|
||||||
var st environment.Stats
|
|
||||||
if err := json.Unmarshal([]byte(e.Data), &st); err != nil {
|
|
||||||
s.Log().WithField("error", err).Warn("failed to unmarshal server environment stats")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the server resource tracking object with the resources we got here.
|
// Update the server resource tracking object with the resources we got here.
|
||||||
s.resources.mu.Lock()
|
s.resources.mu.Lock()
|
||||||
s.resources.Stats = st
|
s.resources.Stats = e.Data.(environment.Stats)
|
||||||
s.resources.mu.Unlock()
|
s.resources.mu.Unlock()
|
||||||
|
|
||||||
// If there is no disk space available at this point, trigger the server disk limiter logic
|
// If there is no disk space available at this point, trigger the server disk limiter logic
|
||||||
|
@ -125,25 +126,27 @@ func (s *Server) StartEventListeners() {
|
||||||
}
|
}
|
||||||
|
|
||||||
s.emitProcUsage()
|
s.emitProcUsage()
|
||||||
}
|
}()
|
||||||
|
case e := <-docker:
|
||||||
docker := func(e events.Event) {
|
go func() {
|
||||||
if e.Topic == environment.DockerImagePullStatus {
|
switch e.Topic {
|
||||||
|
case environment.DockerImagePullStatus:
|
||||||
s.Events().Publish(InstallOutputEvent, e.Data)
|
s.Events().Publish(InstallOutputEvent, e.Data)
|
||||||
} else if e.Topic == environment.DockerImagePullStarted {
|
case environment.DockerImagePullStarted:
|
||||||
s.PublishConsoleOutputFromDaemon("Pulling Docker container image, this could take a few minutes to complete...")
|
s.PublishConsoleOutputFromDaemon("Pulling Docker container image, this could take a few minutes to complete...")
|
||||||
} else {
|
default:
|
||||||
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
|
s.PublishConsoleOutputFromDaemon("Finished pulling Docker container image")
|
||||||
}
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
s.Log().Debug("registering event listeners: console, state, resources...")
|
s.Log().Debug("registering event listeners: console, state, resources...")
|
||||||
s.Environment.Events().On(environment.ConsoleOutputEvent, &console)
|
s.Environment.SetLogCallback(s.processConsoleOutputEvent)
|
||||||
s.Environment.Events().On(environment.StateChangeEvent, &state)
|
s.Environment.Events().On(state, environment.StateChangeEvent)
|
||||||
s.Environment.Events().On(environment.ResourceEvent, &stats)
|
s.Environment.Events().On(stats, environment.ResourceEvent)
|
||||||
for _, evt := range dockerEvents {
|
s.Environment.Events().On(docker, dockerEvents...)
|
||||||
s.Environment.Events().On(evt, &docker)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))")
|
var stripAnsiRegex = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))")
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -137,7 +136,7 @@ func (m *Manager) PersistStates() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
if err := ioutil.WriteFile(config.Get().System.GetStatesPath(), data, 0644); err != nil {
|
if err := os.WriteFile(config.Get().System.GetStatesPath(), data, 0o644); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -145,7 +144,7 @@ func (m *Manager) PersistStates() error {
|
||||||
|
|
||||||
// ReadStates returns the state of the servers.
|
// ReadStates returns the state of the servers.
|
||||||
func (m *Manager) ReadStates() (map[string]string, error) {
|
func (m *Manager) ReadStates() (map[string]string, error) {
|
||||||
f, err := os.OpenFile(config.Get().System.GetStatesPath(), os.O_RDONLY|os.O_CREATE, 0644)
|
f, err := os.OpenFile(config.Get().System.GetStatesPath(), os.O_RDONLY|os.O_CREATE, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,5 @@ func (ru *ResourceUsage) Reset() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) emitProcUsage() {
|
func (s *Server) emitProcUsage() {
|
||||||
if err := s.Events().PublishJson(StatsEvent, s.Proc()); err != nil {
|
s.Events().Publish(StatsEvent, s.Proc())
|
||||||
s.Log().WithField("error", err).Warn("error while emitting server resource usage to listeners")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ type Server struct {
|
||||||
fs *filesystem.Filesystem
|
fs *filesystem.Filesystem
|
||||||
|
|
||||||
// Events emitted by the server instance.
|
// Events emitted by the server instance.
|
||||||
emitter *events.EventBus
|
emitter *events.Bus
|
||||||
|
|
||||||
// Defines the process configuration for the server instance. This is dynamically
|
// Defines the process configuration for the server instance. This is dynamically
|
||||||
// fetched from the Pterodactyl Server instance each time the server process is
|
// fetched from the Pterodactyl Server instance each time the server process is
|
||||||
|
@ -70,6 +70,9 @@ type Server struct {
|
||||||
// Tracks open websocket connections for the server.
|
// Tracks open websocket connections for the server.
|
||||||
wsBag *WebsocketBag
|
wsBag *WebsocketBag
|
||||||
wsBagLocker sync.Mutex
|
wsBagLocker sync.Mutex
|
||||||
|
|
||||||
|
logSink *sinkPool
|
||||||
|
installSink *sinkPool
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new server instance with a context and all of the default
|
// New returns a new server instance with a context and all of the default
|
||||||
|
@ -83,6 +86,9 @@ func New(client remote.Client) (*Server, error) {
|
||||||
installing: system.NewAtomicBool(false),
|
installing: system.NewAtomicBool(false),
|
||||||
transferring: system.NewAtomicBool(false),
|
transferring: system.NewAtomicBool(false),
|
||||||
restoring: system.NewAtomicBool(false),
|
restoring: system.NewAtomicBool(false),
|
||||||
|
|
||||||
|
logSink: newSinkPool(),
|
||||||
|
installSink: newSinkPool(),
|
||||||
}
|
}
|
||||||
if err := defaults.Set(&s); err != nil {
|
if err := defaults.Set(&s); err != nil {
|
||||||
return nil, errors.Wrap(err, "server: could not set default values for struct")
|
return nil, errors.Wrap(err, "server: could not set default values for struct")
|
||||||
|
@ -349,3 +355,11 @@ func (s *Server) ToAPIResponse() APIResponse {
|
||||||
Configuration: *s.Config(),
|
Configuration: *s.Config(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) LogSink() *sinkPool {
|
||||||
|
return s.logSink
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) InstallSink() *sinkPool {
|
||||||
|
return s.installSink
|
||||||
|
}
|
||||||
|
|
71
server/sink.go
Normal file
71
server/sink.go
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// sinkPool represents a pool with sinks.
|
||||||
|
type sinkPool struct {
|
||||||
|
mx sync.RWMutex
|
||||||
|
sinks []chan []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSinkPool returns a new empty sinkPool.
|
||||||
|
func newSinkPool() *sinkPool {
|
||||||
|
return &sinkPool{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// On adds a sink on the pool.
|
||||||
|
func (p *sinkPool) On(c chan []byte) {
|
||||||
|
p.mx.Lock()
|
||||||
|
defer p.mx.Unlock()
|
||||||
|
|
||||||
|
p.sinks = append(p.sinks, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Off removes a sink from the pool.
|
||||||
|
func (p *sinkPool) Off(c chan []byte) {
|
||||||
|
p.mx.Lock()
|
||||||
|
defer p.mx.Unlock()
|
||||||
|
|
||||||
|
sinks := p.sinks
|
||||||
|
|
||||||
|
for i, sink := range sinks {
|
||||||
|
if c != sink {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
copy(sinks[i:], sinks[i+1:])
|
||||||
|
sinks[len(sinks)-1] = nil
|
||||||
|
sinks = sinks[:len(sinks)-1]
|
||||||
|
p.sinks = sinks
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destroy destroys the pool by removing and closing all sinks.
|
||||||
|
func (p *sinkPool) Destroy() {
|
||||||
|
p.mx.Lock()
|
||||||
|
defer p.mx.Unlock()
|
||||||
|
|
||||||
|
for _, c := range p.sinks {
|
||||||
|
close(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.sinks = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push pushes a message to all registered sinks.
|
||||||
|
func (p *sinkPool) Push(v []byte) {
|
||||||
|
p.mx.RLock()
|
||||||
|
for _, c := range p.sinks {
|
||||||
|
// TODO: should this be done in parallel?
|
||||||
|
select {
|
||||||
|
// Send the log output to the channel
|
||||||
|
case c <- v:
|
||||||
|
// Timeout after 100 milliseconds, this will cause the write to the channel to be cancelled.
|
||||||
|
case <-time.After(100 * time.Millisecond):
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.mx.RUnlock()
|
||||||
|
}
|
|
@ -142,12 +142,12 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
||||||
}
|
}
|
||||||
mode := request.Attributes().FileMode().Perm()
|
mode := request.Attributes().FileMode().Perm()
|
||||||
// If the client passes an invalid FileMode just use the default 0644.
|
// If the client passes an invalid FileMode just use the default 0644.
|
||||||
if mode == 0000 {
|
if mode == 0o000 {
|
||||||
mode = os.FileMode(0644)
|
mode = os.FileMode(0o644)
|
||||||
}
|
}
|
||||||
// Force directories to be 0755.
|
// Force directories to be 0755.
|
||||||
if request.Attributes().FileMode().IsDir() {
|
if request.Attributes().FileMode().IsDir() {
|
||||||
mode = 0755
|
mode = 0o755
|
||||||
}
|
}
|
||||||
if err := h.fs.Chmod(request.Filepath, mode); err != nil {
|
if err := h.fs.Chmod(request.Filepath, mode); err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
@ -260,7 +260,6 @@ func (h *Handler) Filelist(request *sftp.Request) (sftp.ListerAt, error) {
|
||||||
files, err := ioutil.ReadDir(p)
|
files, err := ioutil.ReadDir(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory")
|
h.logger.WithField("source", request.Filepath).WithField("error", err).Error("error while listing directory")
|
||||||
|
|
||||||
return nil, sftp.ErrSSHFxFailure
|
return nil, sftp.ErrSSHFxFailure
|
||||||
}
|
}
|
||||||
return ListerAt(files), nil
|
return ListerAt(files), nil
|
||||||
|
|
|
@ -52,7 +52,9 @@ func New(m *server.Manager) *SFTPServer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Starts the SFTP server and add a persistent listener to handle inbound SFTP connections.
|
// Run starts the SFTP server and add a persistent listener to handle inbound
|
||||||
|
// SFTP connections. This will automatically generate an ED25519 key if one does
|
||||||
|
// not already exist on the system for host key verification purposes.
|
||||||
func (c *SFTPServer) Run() error {
|
func (c *SFTPServer) Run() error {
|
||||||
keys, err := c.loadPrivateKeys()
|
keys, err := c.loadPrivateKeys()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -214,10 +216,10 @@ func (c *SFTPServer) generateRSAPrivateKey() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(path.Join(c.BasePath, ".sftp"), 0755); err != nil {
|
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("rsa")), 0o755); err != nil {
|
||||||
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
||||||
}
|
}
|
||||||
o, err := os.OpenFile(path.Join(c.BasePath, ".sftp/id_rsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
o, err := os.OpenFile(c.PrivateKeyPath("rsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -238,10 +240,10 @@ func (c *SFTPServer) generateECDSAPrivateKey() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(path.Join(c.BasePath, ".sftp"), 0755); err != nil {
|
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("ecdsa")), 0o755); err != nil {
|
||||||
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
||||||
}
|
}
|
||||||
o, err := os.OpenFile(path.Join(c.BasePath, ".sftp/id_ecdsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
o, err := os.OpenFile(c.PrivateKeyPath("ecdsa"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -261,16 +263,16 @@ func (c *SFTPServer) generateECDSAPrivateKey() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// generateEd25519PrivateKey generates a ed25519 private key that will be used by the SFTP server.
|
// generateEd25519PrivateKey generates an ed25519 private key that will be used by the SFTP server.
|
||||||
func (c *SFTPServer) generateEd25519PrivateKey() error {
|
func (c *SFTPServer) generateEd25519PrivateKey() error {
|
||||||
_, key, err := ed25519.GenerateKey(rand.Reader)
|
_, key, err := ed25519.GenerateKey(rand.Reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(path.Join(c.BasePath, ".sftp"), 0755); err != nil {
|
if err := os.MkdirAll(path.Dir(c.PrivateKeyPath("ed25519")), 0o755); err != nil {
|
||||||
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
return errors.Wrap(err, "sftp/server: could not create .sftp directory")
|
||||||
}
|
}
|
||||||
o, err := os.OpenFile(path.Join(c.BasePath, ".sftp/id_ed25519"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
|
o, err := os.OpenFile(c.PrivateKeyPath("ed25519"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -290,6 +292,11 @@ func (c *SFTPServer) generateEd25519PrivateKey() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrivateKeyPath returns the path the host private key for this server instance.
|
||||||
|
func (c *SFTPServer) PrivateKeyPath(name string) string {
|
||||||
|
return path.Join(c.BasePath, ".sftp", "id_"+name)
|
||||||
|
}
|
||||||
|
|
||||||
// A function capable of validating user credentials with the Panel API.
|
// A function capable of validating user credentials with the Panel API.
|
||||||
func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
|
func (c *SFTPServer) passwordCallback(conn ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
|
||||||
request := remote.SftpAuthRequest{
|
request := remote.SftpAuthRequest{
|
||||||
|
|
|
@ -6,15 +6,15 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Extends the default SFTP server to return a quota exceeded error to the client.
|
// ErrSSHQuotaExceeded extends the default SFTP server to return a quota exceeded error to the client.
|
||||||
//
|
//
|
||||||
// @see https://tools.ietf.org/id/draft-ietf-secsh-filexfer-13.txt
|
// @see https://tools.ietf.org/id/draft-ietf-secsh-filexfer-13.txt
|
||||||
ErrSSHQuotaExceeded = fxerr(15)
|
ErrSSHQuotaExceeded = fxErr(15)
|
||||||
)
|
)
|
||||||
|
|
||||||
type ListerAt []os.FileInfo
|
type ListerAt []os.FileInfo
|
||||||
|
|
||||||
// Returns the number of entries copied and an io.EOF error if we made it to the end of the file list.
|
// ListAt returns the number of entries copied and an io.EOF error if we made it to the end of the file list.
|
||||||
// Take a look at the pkg/sftp godoc for more information about how this function should work.
|
// Take a look at the pkg/sftp godoc for more information about how this function should work.
|
||||||
func (l ListerAt) ListAt(f []os.FileInfo, offset int64) (int, error) {
|
func (l ListerAt) ListAt(f []os.FileInfo, offset int64) (int, error) {
|
||||||
if offset >= int64(len(l)) {
|
if offset >= int64(len(l)) {
|
||||||
|
@ -28,9 +28,9 @@ func (l ListerAt) ListAt(f []os.FileInfo, offset int64) (int, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type fxerr uint32
|
type fxErr uint32
|
||||||
|
|
||||||
func (e fxerr) Error() string {
|
func (e fxErr) Error() string {
|
||||||
switch e {
|
switch e {
|
||||||
case ErrSSHQuotaExceeded:
|
case ErrSSHQuotaExceeded:
|
||||||
return "Quota Exceeded"
|
return "Quota Exceeded"
|
||||||
|
|
|
@ -1,6 +1,3 @@
|
||||||
package system
|
package system
|
||||||
|
|
||||||
var (
|
var Version = "develop"
|
||||||
// The current version of this software.
|
|
||||||
Version = "0.0.1"
|
|
||||||
)
|
|
||||||
|
|
|
@ -8,15 +8,16 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"emperror.dev/errors"
|
"emperror.dev/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var cr = []byte(" \r")
|
var (
|
||||||
var crr = []byte("\r\n")
|
cr = []byte(" \r")
|
||||||
|
crr = []byte("\r\n")
|
||||||
|
)
|
||||||
|
|
||||||
// FirstNotEmpty returns the first string passed in that is not an empty value.
|
// FirstNotEmpty returns the first string passed in that is not an empty value.
|
||||||
func FirstNotEmpty(v ...string) string {
|
func FirstNotEmpty(v ...string) string {
|
||||||
|
@ -36,14 +37,14 @@ func MustInt(v string) int {
|
||||||
return i
|
return i
|
||||||
}
|
}
|
||||||
|
|
||||||
func ScanReader(r io.Reader, callback func(line string)) error {
|
func ScanReader(r io.Reader, callback func(line []byte)) error {
|
||||||
br := bufio.NewReader(r)
|
br := bufio.NewReader(r)
|
||||||
// Avoid constantly re-allocating memory when we're flooding lines through this
|
// Avoid constantly re-allocating memory when we're flooding lines through this
|
||||||
// function by using the same buffer for the duration of the call and just truncating
|
// function by using the same buffer for the duration of the call and just truncating
|
||||||
// the value back to 0 every loop.
|
// the value back to 0 every loop.
|
||||||
var str strings.Builder
|
buf := &bytes.Buffer{}
|
||||||
for {
|
for {
|
||||||
str.Reset()
|
buf.Reset()
|
||||||
var err error
|
var err error
|
||||||
var line []byte
|
var line []byte
|
||||||
var isPrefix bool
|
var isPrefix bool
|
||||||
|
@ -55,7 +56,7 @@ func ScanReader(r io.Reader, callback func(line string)) error {
|
||||||
// in line with that it thinks is the terminal size. Those returns break a lot of output handling,
|
// in line with that it thinks is the terminal size. Those returns break a lot of output handling,
|
||||||
// so we'll just replace them with proper new-lines and then split it later and send each line as
|
// so we'll just replace them with proper new-lines and then split it later and send each line as
|
||||||
// its own event in the response.
|
// its own event in the response.
|
||||||
str.Write(bytes.Replace(line, cr, crr, -1))
|
buf.Write(bytes.Replace(line, cr, crr, -1))
|
||||||
// Finish this loop and begin outputting the line if there is no prefix (the line fit into
|
// Finish this loop and begin outputting the line if there is no prefix (the line fit into
|
||||||
// the default buffer), or if we hit the end of the line.
|
// the default buffer), or if we hit the end of the line.
|
||||||
if !isPrefix || err == io.EOF {
|
if !isPrefix || err == io.EOF {
|
||||||
|
@ -69,8 +70,9 @@ func ScanReader(r io.Reader, callback func(line string)) error {
|
||||||
}
|
}
|
||||||
// Publish the line for this loop. Break on new-line characters so every line is sent as a single
|
// Publish the line for this loop. Break on new-line characters so every line is sent as a single
|
||||||
// output event, otherwise you get funky handling in the browser console.
|
// output event, otherwise you get funky handling in the browser console.
|
||||||
for _, line := range strings.Split(str.String(), "\r\n") {
|
s := bufio.NewScanner(buf)
|
||||||
callback(line)
|
for s.Scan() {
|
||||||
|
callback(s.Bytes())
|
||||||
}
|
}
|
||||||
// If the error we got previously that lead to the line being output is an io.EOF we want to
|
// If the error we got previously that lead to the line being output is an io.EOF we want to
|
||||||
// exit the entire looping process.
|
// exit the entire looping process.
|
||||||
|
|
Loading…
Reference in New Issue
Block a user