Compare commits

..

7 Commits

Author SHA1 Message Date
Dane Everitt
93417dddb1 Update CHANGELOG.md 2021-01-08 21:23:25 -08:00
Dane Everitt
044c46fc9a Merge branch 'develop' of https://github.com/pterodactyl/wings into develop 2021-01-08 21:21:37 -08:00
Dane Everitt
c9d972d544 Revert usage of ContainerWait, return to io.Copy blocking
Until https://github.com/moby/moby/issues/41827 is resolved this code causes chaos to unfold on machines and causes servers to be non-terminatable.

This logic was intially changed to logical purposes, but this io.Copy logic works perfectly fine (even if not immediately intuitive).
2021-01-08 21:21:09 -08:00
Matthew Penner
0aab4b1ac2 environment(docker): re-attach to container logs after EOF 2021-01-08 08:19:33 -07:00
Matthew Penner
4f4b4fd2e6 environment(docker): cleanup code 2021-01-08 08:15:40 -07:00
Matthew Penner
66c9be357c Potential fix for servers being marked as stopping after being marked as offline 2021-01-07 19:32:15 -07:00
Matthew Penner
1d36811dfe Fix v being shown twice on wings boot 2021-01-07 16:44:09 -07:00
8 changed files with 80 additions and 41 deletions

View File

@@ -1,5 +1,9 @@
# Changelog
## v1.2.2
### Fixed
* Reverts changes to logic handling blocking until a server process is done running when polling stats. This change exposed a bug in the underlying Docker system causing servers to enter a state in which Wings was unable to terminate the process and Docker commands would hang if executed against the container.
## v1.2.1
### Fixed
* Fixes servers not be properly marked as no longer transfering if an error occurs during the archive process.

View File

@@ -406,7 +406,7 @@ __ [blue][bold]Pterodactyl[reset] _____/___/_______ _______ ______
\_____\ \/\/ / / / __ / ___/
\___\ / / / / /_/ /___ /
\___/\___/___/___/___/___ /______/
/_______/ [bold]v%s[reset]
/_______/ [bold]%s[reset]
Copyright © 2018 - 2021 Dane Everitt & Contributors

View File

@@ -26,6 +26,18 @@ type imagePullStatus struct {
Progress string `json:"progress"`
}
// A custom console writer that allows us to keep a function blocked until the
// given stream is properly closed. This does nothing special, only exists to
// make a noop io.Writer.
type noopWriter struct{}
var _ io.Writer = noopWriter{}
// Implement the required Write function to satisfy the io.Writer interface.
func (nw noopWriter) Write(b []byte) (int, error) {
return len(b), nil
}
// Attaches to the docker container itself and ensures that we can pipe data in and out
// of the process stream. This should not be used for reading console data as you *will*
// miss important output at the beginning because of the time delay with attaching to the
@@ -60,8 +72,8 @@ func (e *Environment) Attach() error {
go func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
defer e.stream.Close()
defer func() {
e.stream.Close()
e.SetState(environment.ProcessOfflineState)
e.SetStream(nil)
}()
@@ -78,24 +90,18 @@ func (e *Environment) Attach() error {
// Block the completion of this routine until the container is no longer running. This allows
// the pollResources function to run until it needs to be stopped. Because the container
// can be polled for resource usage, even when sropped, we need to have this logic present
// can be polled for resource usage, even when stopped, we need to have this logic present
// in order to cancel the context and therefore stop the routine that is spawned.
ok, err := e.client.ContainerWait(ctx, e.Id, container.WaitConditionNotRunning)
select {
case <-ctx.Done():
// Do nothing, the context was canceled by a different process, there is no error
// to report at this point.
e.log().Debug("terminating ContainerWait blocking process, context canceled")
return
case _ = <-err:
// An error occurred with the ContainerWait call, report it here and then hope
// for the fucking best I guess?
e.log().WithField("error", err).Error("error while blocking using ContainerWait")
return
case <-ok:
// Do nothing, everything is running as expected. This will allow us to keep
// blocking the termination of this function until the container stops at which
// point all of our deferred functions can run.
//
// For now, DO NOT use client#ContainerWait from the Docker package. There is a nasty
// bug causing containers to hang on deletion and cause servers to lock up on the system.
//
// This weird code isn't intuitive, but it keeps the function from ending until the container
// is stopped and therefore the stream reader ends up closed.
// @see https://github.com/moby/moby/issues/41827
c := new(noopWriter)
if _, err := io.Copy(c, e.stream.Reader); err != nil {
e.log().WithField("error", err).Error("could not copy from environment stream to noop writer")
}
}()
@@ -272,6 +278,8 @@ func (e *Environment) Destroy() error {
Force: true,
})
e.SetState(environment.ProcessOfflineState)
// Don't trigger a destroy failure if we try to delete a container that does not
// exist on the system. We're just a step ahead of ourselves in that case.
//
@@ -280,8 +288,6 @@ func (e *Environment) Destroy() error {
return nil
}
e.SetState(environment.ProcessOfflineState)
return err
}
@@ -307,19 +313,39 @@ func (e *Environment) followOutput() error {
if err != nil {
return err
}
go func(reader io.ReadCloser) {
defer reader.Close()
evts := e.Events()
err := system.ScanReader(reader, func(line string) {
evts.Publish(environment.ConsoleOutputEvent, line)
})
if err != nil && err != io.EOF {
log.WithField("error", err).WithField("container_id", e.Id).Warn("error processing scanner line in console output")
}
}(reader)
go e.scanOutput(reader)
return nil
}
func (e *Environment) scanOutput(reader io.ReadCloser) {
defer reader.Close()
events := e.Events()
err := system.ScanReader(reader, func(line string) {
events.Publish(environment.ConsoleOutputEvent, line)
})
if err != nil && err != io.EOF {
log.WithField("error", err).WithField("container_id", e.Id).Warn("error processing scanner line in console output")
return
}
// Return here if the server is offline or currently stopping.
if e.State() == environment.ProcessStoppingState || e.State() == environment.ProcessOfflineState {
return
}
// Close the current reader before starting a new one, the defer will still run
// but it will do nothing if we already closed the stream.
_ = reader.Close()
// Start following the output of the server again.
go e.followOutput()
}
// Pulls the image from Docker. If there is an error while pulling the image from the source
// but the image already exists locally, we will report that error to the logger but continue
// with the process.
@@ -403,9 +429,11 @@ func (e *Environment) ensureImageExists(image string) error {
// I'm not sure what the best approach here is, but this will block execution until the image
// is done being pulled, which is what we need.
scanner := bufio.NewScanner(out)
for scanner.Scan() {
s := imagePullStatus{}
fmt.Println(scanner.Text())
if err := json.Unmarshal(scanner.Bytes(), &s); err == nil {
e.Events().Publish(environment.DockerImagePullStatus, s.Status+" "+s.Progress)
}

View File

@@ -82,8 +82,9 @@ func (e *Environment) Type() string {
// Set if this process is currently attached to the process.
func (e *Environment) SetStream(s *types.HijackedResponse) {
e.mu.Lock()
defer e.mu.Unlock()
e.stream = s
e.mu.Unlock()
}
// Determine if the this process is currently attached to the container.
@@ -98,6 +99,7 @@ func (e *Environment) Events() *events.EventBus {
e.eventMu.Do(func() {
e.emitter = events.New()
})
return e.emitter
}
@@ -174,12 +176,14 @@ func (e *Environment) Config() *environment.Configuration {
// Sets the stop configuration for the environment.
func (e *Environment) SetStopConfiguration(c api.ProcessStopConfiguration) {
e.mu.Lock()
defer e.mu.Unlock()
e.meta.Stop = c
e.mu.Unlock()
}
func (e *Environment) SetImage(i string) {
e.mu.Lock()
defer e.mu.Unlock()
e.meta.Image = i
e.mu.Unlock()
}

View File

@@ -20,10 +20,9 @@ import (
//
// This process will also confirm that the server environment exists and is in a bootable
// state. This ensures that unexpected container deletion while Wings is running does
// not result in the server becoming unbootable.
// not result in the server becoming un-bootable.
func (e *Environment) OnBeforeStart() error {
// Always destroy and re-create the server container to ensure that synced data from
// the Panel is usee.
// Always destroy and re-create the server container to ensure that synced data from the Panel is used.
if err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
if !client.IsErrNotFound(err) {
return errors.WithMessage(err, "failed to remove server docker container during pre-boot")
@@ -49,6 +48,7 @@ func (e *Environment) OnBeforeStart() error {
// call to OnBeforeStart().
func (e *Environment) Start() error {
sawError := false
// If sawError is set to true there was an error somewhere in the pipeline that
// got passed up, but we also want to ensure we set the server to be offline at
// that point.
@@ -235,7 +235,7 @@ func (e *Environment) Terminate(signal os.Signal) error {
sig := strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed")
if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil {
if err := e.client.ContainerKill(context.Background(), e.Id, sig); err != nil && !client.IsErrNotFound(err) {
return err
}

View File

@@ -19,8 +19,9 @@ var ErrNotAttached = errors.New("not attached to instance")
func (e *Environment) setStream(s *types.HijackedResponse) {
e.mu.Lock()
defer e.mu.Unlock()
e.stream = s
e.mu.Unlock()
}
// Sends the specified command to the stdin of the running container instance. There is no
@@ -71,7 +72,7 @@ func (e *Environment) Readlog(lines int) ([]string, error) {
// Docker stores the logs for server output in a JSON format. This function will iterate over the JSON
// that was read from the log file and parse it into a more human readable format.
func (e *Environment) parseLogToStrings(b []byte) ([]string, error) {
var hasError = false
hasError := false
var out []string
scanner := bufio.NewScanner(bytes.NewReader(b))

View File

@@ -64,9 +64,11 @@ func (s *Server) StartEventListeners() {
// to terminate again.
if s.Environment.State() != environment.ProcessStoppingState {
s.Environment.SetState(environment.ProcessStoppingState)
go func() {
s.Log().Warn("stopping server instance, violating throttle limits")
s.PublishConsoleOutputFromDaemon("Your server is being stopped for outputting too much data in a short period of time.")
// Completely skip over server power actions and terminate the running instance. This gives the
// server 15 seconds to finish stopping gracefully before it is forcefully terminated.
if err := s.Environment.WaitForStop(config.Get().Throttles.StopGracePeriod, true); err != nil {

View File

@@ -2,5 +2,5 @@ package system
var (
// The current version of this software.
Version = "0.0.1"
Version = "v0.0.1"
)