2019-04-04 05:01:11 +00:00
|
|
|
package server
|
|
|
|
|
|
|
|
import (
|
2019-04-06 19:27:44 +00:00
|
|
|
"bufio"
|
|
|
|
"bytes"
|
2019-11-16 23:48:50 +00:00
|
|
|
"context"
|
2019-04-06 19:27:44 +00:00
|
|
|
"encoding/json"
|
2019-04-04 05:01:11 +00:00
|
|
|
"fmt"
|
2020-05-29 05:07:53 +00:00
|
|
|
"github.com/apex/log"
|
2019-04-06 04:59:27 +00:00
|
|
|
"github.com/docker/docker/api/types"
|
2019-04-04 05:01:11 +00:00
|
|
|
"github.com/docker/docker/api/types/container"
|
2019-04-04 05:49:15 +00:00
|
|
|
"github.com/docker/docker/api/types/mount"
|
2019-04-04 05:01:11 +00:00
|
|
|
"github.com/docker/docker/client"
|
2019-04-04 05:49:15 +00:00
|
|
|
"github.com/docker/docker/daemon/logger/jsonfilelog"
|
2019-04-04 06:09:15 +00:00
|
|
|
"github.com/docker/go-connections/nat"
|
2019-04-06 19:27:44 +00:00
|
|
|
"github.com/pkg/errors"
|
2019-09-23 04:22:16 +00:00
|
|
|
"github.com/pterodactyl/wings/api"
|
2019-12-25 00:40:51 +00:00
|
|
|
"github.com/pterodactyl/wings/config"
|
2019-04-06 19:27:44 +00:00
|
|
|
"io"
|
2019-04-04 05:01:11 +00:00
|
|
|
"os"
|
2020-07-04 21:37:52 +00:00
|
|
|
"path/filepath"
|
2019-04-04 06:54:38 +00:00
|
|
|
"strconv"
|
2019-04-04 05:01:11 +00:00
|
|
|
"strings"
|
2020-07-18 23:03:25 +00:00
|
|
|
"sync"
|
2019-04-06 04:59:27 +00:00
|
|
|
"time"
|
2019-04-04 05:01:11 +00:00
|
|
|
)
|
|
|
|
|
2019-04-06 04:59:27 +00:00
|
|
|
// Defines the base environment for Docker instances running through Wings.
|
2019-04-04 05:01:11 +00:00
|
|
|
type DockerEnvironment struct {
|
2020-07-18 23:46:41 +00:00
|
|
|
sync.RWMutex
|
|
|
|
|
2019-04-04 05:01:11 +00:00
|
|
|
Server *Server
|
|
|
|
|
2019-04-06 04:59:27 +00:00
|
|
|
// The Docker client being used for this instance.
|
|
|
|
Client *client.Client
|
2019-04-20 23:20:08 +00:00
|
|
|
|
|
|
|
// Controls the hijacked response stream which exists only when we're attached to
|
|
|
|
// the running container instance.
|
2020-08-01 22:34:14 +00:00
|
|
|
stream *types.HijackedResponse
|
2019-08-17 20:19:56 +00:00
|
|
|
|
|
|
|
// Holds the stats stream used by the polling commands so that we can easily close
|
|
|
|
// it out.
|
|
|
|
stats io.ReadCloser
|
2020-07-18 23:03:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set if this process is currently attached to the process.
|
2020-08-01 22:34:14 +00:00
|
|
|
func (d *DockerEnvironment) SetStream(s *types.HijackedResponse) {
|
2020-07-18 23:03:25 +00:00
|
|
|
d.Lock()
|
2020-08-01 22:34:14 +00:00
|
|
|
d.stream = s
|
2020-07-18 23:03:25 +00:00
|
|
|
d.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine if the this process is currently attached to the container.
|
|
|
|
func (d *DockerEnvironment) IsAttached() bool {
|
|
|
|
d.RLock()
|
|
|
|
defer d.RUnlock()
|
|
|
|
|
2020-08-01 22:34:14 +00:00
|
|
|
return d.stream != nil
|
2019-04-06 04:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Creates a new base Docker environment. A server must still be attached to it.
|
2019-12-25 00:40:51 +00:00
|
|
|
func NewDockerEnvironment(server *Server) error {
|
2019-04-06 04:59:27 +00:00
|
|
|
cli, err := client.NewClientWithOpts(client.FromEnv)
|
|
|
|
if err != nil {
|
2019-12-25 00:40:51 +00:00
|
|
|
return err
|
2019-04-06 20:33:54 +00:00
|
|
|
}
|
|
|
|
|
2019-12-25 00:40:51 +00:00
|
|
|
server.Environment = &DockerEnvironment{
|
2020-01-18 21:05:44 +00:00
|
|
|
Server: server,
|
|
|
|
Client: cli,
|
2019-04-06 20:33:54 +00:00
|
|
|
}
|
|
|
|
|
2019-12-25 00:40:51 +00:00
|
|
|
return nil
|
2019-04-04 05:01:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the Docker environment is always implementing all of the methods
|
|
|
|
// from the base environment interface.
|
|
|
|
var _ Environment = (*DockerEnvironment)(nil)
|
|
|
|
|
2019-04-06 19:27:44 +00:00
|
|
|
// Returns the name of the environment.
|
|
|
|
func (d *DockerEnvironment) Type() string {
|
|
|
|
return "docker"
|
|
|
|
}
|
|
|
|
|
2019-04-06 04:59:27 +00:00
|
|
|
// Determines if the container exists in this environment.
|
2019-04-20 23:20:08 +00:00
|
|
|
func (d *DockerEnvironment) Exists() (bool, error) {
|
2020-07-18 23:03:25 +00:00
|
|
|
_, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
2019-04-06 04:59:27 +00:00
|
|
|
|
2019-04-20 23:20:08 +00:00
|
|
|
if err != nil {
|
|
|
|
// If this error is because the container instance wasn't found via Docker we
|
|
|
|
// can safely ignore the error and just return false.
|
|
|
|
if client.IsErrNotFound(err) {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determines if the server's docker container is currently running. If there is no container
|
|
|
|
// present, an error will be raised (since this shouldn't be a case that ever happens under
|
|
|
|
// correctly developed circumstances).
|
|
|
|
//
|
|
|
|
// You can confirm if the instance wasn't found by using client.IsErrNotFound from the Docker
|
|
|
|
// API.
|
|
|
|
//
|
|
|
|
// @see docker/client/errors.go
|
|
|
|
func (d *DockerEnvironment) IsRunning() (bool, error) {
|
2020-07-18 23:03:25 +00:00
|
|
|
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
2019-04-20 23:20:08 +00:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return c.State.Running, nil
|
2019-04-04 05:01:11 +00:00
|
|
|
}
|
|
|
|
|
2019-11-24 23:08:38 +00:00
|
|
|
// Performs an in-place update of the Docker container's resource limits without actually
|
|
|
|
// making any changes to the operational state of the container. This allows memory, cpu,
|
|
|
|
// and IO limitations to be adjusted on the fly for individual instances.
|
|
|
|
func (d *DockerEnvironment) InSituUpdate() error {
|
2020-07-18 23:03:25 +00:00
|
|
|
if _, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err != nil {
|
2019-11-24 23:08:38 +00:00
|
|
|
// If the container doesn't exist for some reason there really isn't anything
|
|
|
|
// we can do to fix that in this process (it doesn't make sense at least). In those
|
|
|
|
// cases just return without doing anything since we still want to save the configuration
|
|
|
|
// to the disk.
|
|
|
|
//
|
|
|
|
// We'll let a boot process make modifications to the container if needed at this point.
|
|
|
|
if client.IsErrNotFound(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
u := container.UpdateConfig{
|
|
|
|
Resources: d.getResourcesForServer(),
|
|
|
|
}
|
|
|
|
|
2020-05-31 19:42:10 +00:00
|
|
|
d.Server.Log().WithField("limits", fmt.Sprintf("%+v", u.Resources)).Debug("updating server container on-the-fly with passed limits")
|
2020-07-04 21:14:22 +00:00
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
|
|
|
defer cancel()
|
2020-07-18 23:03:25 +00:00
|
|
|
if _, err := d.Client.ContainerUpdate(ctx, d.Server.Id(), u); err != nil {
|
2019-11-24 23:08:38 +00:00
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-23 03:47:38 +00:00
|
|
|
// Run before the container starts and get the process configuration from the Panel.
|
|
|
|
// This is important since we use this to check configuration files as well as ensure
|
|
|
|
// we always have the latest version of an egg available for server processes.
|
2019-11-25 05:12:02 +00:00
|
|
|
//
|
|
|
|
// This process will also confirm that the server environment exists and is in a bootable
|
|
|
|
// state. This ensures that unexpected container deletion while Wings is running does
|
|
|
|
// not result in the server becoming unbootable.
|
2019-09-23 03:47:38 +00:00
|
|
|
func (d *DockerEnvironment) OnBeforeStart() error {
|
2020-05-29 05:07:53 +00:00
|
|
|
d.Server.Log().Info("syncing server configuration with panel")
|
2019-12-22 21:21:21 +00:00
|
|
|
if err := d.Server.Sync(); err != nil {
|
2019-12-17 04:47:35 +00:00
|
|
|
return err
|
2019-09-23 03:47:38 +00:00
|
|
|
}
|
|
|
|
|
2020-04-18 22:31:34 +00:00
|
|
|
if !d.Server.Filesystem.HasSpaceAvailable() {
|
|
|
|
return errors.New("cannot start server, not enough disk space available")
|
|
|
|
}
|
|
|
|
|
2019-12-22 21:52:22 +00:00
|
|
|
// Always destroy and re-create the server container to ensure that synced data from
|
|
|
|
// the Panel is used.
|
2020-07-18 23:03:25 +00:00
|
|
|
if err := d.Client.ContainerRemove(context.Background(), d.Server.Id(), types.ContainerRemoveOptions{RemoveVolumes: true}); err != nil {
|
2019-12-22 21:52:22 +00:00
|
|
|
if !client.IsErrNotFound(err) {
|
|
|
|
return err
|
2019-11-25 05:12:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The Create() function will check if the container exists in the first place, and if
|
|
|
|
// so just silently return without an error. Otherwise, it will try to create the necessary
|
|
|
|
// container and data storage directory.
|
|
|
|
//
|
|
|
|
// This won't actually run an installation process however, it is just here to ensure the
|
|
|
|
// environment gets created properly if it is missing and the server is started. We're making
|
|
|
|
// an assumption that all of the files will still exist at this point.
|
|
|
|
if err := d.Create(); err != nil {
|
2019-12-17 04:47:35 +00:00
|
|
|
return err
|
2019-11-25 05:12:02 +00:00
|
|
|
}
|
|
|
|
|
2019-09-23 03:47:38 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-11-24 21:04:24 +00:00
|
|
|
// Starts the server environment and begins piping output to the event listeners for the
|
2019-11-25 05:12:02 +00:00
|
|
|
// console. If a container does not exist, or needs to be rebuilt that will happen in the
|
|
|
|
// call to OnBeforeStart().
|
2019-04-04 05:01:11 +00:00
|
|
|
func (d *DockerEnvironment) Start() error {
|
2019-09-23 03:47:38 +00:00
|
|
|
sawError := false
|
|
|
|
// If sawError is set to true there was an error somewhere in the pipeline that
|
|
|
|
// got passed up, but we also want to ensure we set the server to be offline at
|
|
|
|
// that point.
|
2019-09-23 04:22:16 +00:00
|
|
|
defer func() {
|
2019-09-23 03:47:38 +00:00
|
|
|
if sawError {
|
2020-05-29 02:53:12 +00:00
|
|
|
// If we don't set it to stopping first, you'll trigger crash detection which
|
|
|
|
// we don't want to do at this point since it'll just immediately try to do the
|
|
|
|
// exact same action that lead to it crashing in the first place...
|
2020-08-04 23:19:13 +00:00
|
|
|
_ = d.Server.SetState(ProcessStoppingState)
|
|
|
|
_ = d.Server.SetState(ProcessOfflineState)
|
2019-09-23 03:47:38 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-11-30 23:19:08 +00:00
|
|
|
// If the server is suspended the user shouldn't be able to boot it, in those cases
|
|
|
|
// return a suspension error and let the calling area handle the issue.
|
|
|
|
//
|
|
|
|
// Theoretically you'd have the Panel handle all of this logic, but we cannot do that
|
|
|
|
// because we allow the websocket to control the server power state as well, so we'll
|
|
|
|
// need to handle that action in here.
|
2020-07-19 23:27:55 +00:00
|
|
|
if d.Server.IsSuspended() {
|
2019-11-30 23:19:08 +00:00
|
|
|
return &suspendedError{}
|
|
|
|
}
|
|
|
|
|
2020-07-18 23:03:25 +00:00
|
|
|
if c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err != nil {
|
2020-05-09 03:19:44 +00:00
|
|
|
// Do nothing if the container is not found, we just don't want to continue
|
|
|
|
// to the next block of code here. This check was inlined here to guard againt
|
|
|
|
// a nil-pointer when checking c.State below.
|
|
|
|
//
|
|
|
|
// @see https://github.com/pterodactyl/panel/issues/2000
|
|
|
|
if !client.IsErrNotFound(err) {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If the server is running update our internal state and continue on with the attach.
|
|
|
|
if c.State.Running {
|
2020-08-04 23:19:13 +00:00
|
|
|
_ = d.Server.SetState(ProcessRunningState)
|
2019-04-06 04:59:27 +00:00
|
|
|
|
2020-05-09 03:19:44 +00:00
|
|
|
return d.Attach()
|
|
|
|
}
|
2019-04-20 23:20:08 +00:00
|
|
|
|
2020-05-09 03:19:44 +00:00
|
|
|
// Truncate the log file so we don't end up outputting a bunch of useless log information
|
|
|
|
// to the websocket and whatnot. Check first that the path and file exist before trying
|
|
|
|
// to truncate them.
|
|
|
|
if _, err := os.Stat(c.LogPath); err == nil {
|
|
|
|
if err := os.Truncate(c.LogPath, 0); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
}
|
2019-04-06 04:59:27 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 23:19:13 +00:00
|
|
|
_ = d.Server.SetState(ProcessStartingState)
|
|
|
|
|
2019-09-23 03:47:38 +00:00
|
|
|
// Set this to true for now, we will set it to false once we reach the
|
|
|
|
// end of this chain.
|
|
|
|
sawError = true
|
|
|
|
|
2019-11-25 05:12:02 +00:00
|
|
|
// Run the before start function and wait for it to finish. This will validate that the container
|
|
|
|
// exists on the system, and rebuild the container if that is required for server booting to
|
|
|
|
// occur.
|
2019-09-23 03:47:38 +00:00
|
|
|
if err := d.OnBeforeStart(); err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-09-23 03:47:38 +00:00
|
|
|
}
|
|
|
|
|
2019-12-01 02:07:05 +00:00
|
|
|
// Update the configuration files defined for the server before beginning the boot process.
|
|
|
|
// This process executes a bunch of parallel updates, so we just block until that process
|
|
|
|
// is completed. Any errors as a result of this will just be bubbled out in the logger,
|
|
|
|
// we don't need to actively do anything about it at this point, worst comes to worst the
|
|
|
|
// server starts in a weird state and the user can manually adjust.
|
|
|
|
d.Server.UpdateConfigurationFiles()
|
|
|
|
|
2019-05-10 04:48:58 +00:00
|
|
|
// Reset the permissions on files for the server before actually trying
|
|
|
|
// to start it.
|
|
|
|
if err := d.Server.Filesystem.Chown("/"); err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-05-10 04:48:58 +00:00
|
|
|
}
|
|
|
|
|
2020-07-04 21:14:22 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
|
|
|
defer cancel()
|
2020-07-11 18:32:34 +00:00
|
|
|
|
2020-07-18 23:03:25 +00:00
|
|
|
if err := d.Client.ContainerStart(ctx, d.Server.Id(), types.ContainerStartOptions{}); err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-04-20 23:20:08 +00:00
|
|
|
}
|
|
|
|
|
2019-09-23 03:47:38 +00:00
|
|
|
// No errors, good to continue through.
|
|
|
|
sawError = false
|
|
|
|
|
2019-04-20 23:20:08 +00:00
|
|
|
return d.Attach()
|
2019-04-04 05:01:11 +00:00
|
|
|
}
|
|
|
|
|
2019-04-06 04:59:27 +00:00
|
|
|
// Stops the container that the server is running in. This will allow up to 10
|
|
|
|
// seconds to pass before a failure occurs.
|
2019-04-04 05:01:11 +00:00
|
|
|
func (d *DockerEnvironment) Stop() error {
|
2020-07-18 23:03:25 +00:00
|
|
|
stop := d.Server.ProcessConfiguration().Stop
|
2019-09-23 04:22:16 +00:00
|
|
|
if stop.Type == api.ProcessStopSignal {
|
|
|
|
return d.Terminate(os.Kill)
|
|
|
|
}
|
2019-04-06 04:59:27 +00:00
|
|
|
|
2020-08-04 23:19:13 +00:00
|
|
|
_ = d.Server.SetState(ProcessStoppingState)
|
|
|
|
|
2020-07-18 23:46:41 +00:00
|
|
|
// Only attempt to send the stop command to the instance if we are actually attached to
|
|
|
|
// the instance. If we are not for some reason, just send the container stop event.
|
|
|
|
if d.IsAttached() && stop.Type == api.ProcessStopCommand {
|
2019-09-23 04:22:16 +00:00
|
|
|
return d.SendCommand(stop.Value)
|
|
|
|
}
|
|
|
|
|
|
|
|
t := time.Second * 10
|
|
|
|
|
2020-07-18 23:46:41 +00:00
|
|
|
err := d.Client.ContainerStop(context.Background(), d.Server.Id(), &t)
|
|
|
|
if err != nil {
|
|
|
|
// If the container does not exist just mark the process as stopped and return without
|
|
|
|
// an error.
|
|
|
|
if client.IsErrNotFound(err) {
|
2020-08-01 22:34:14 +00:00
|
|
|
d.SetStream(nil)
|
2020-08-04 23:19:13 +00:00
|
|
|
_ = d.Server.SetState(ProcessOfflineState)
|
2020-07-18 23:46:41 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restarts the server process by waiting for the process to gracefully stop and then triggering a
|
|
|
|
// start command. This will return an error if there is already a restart process executing for the
|
|
|
|
// server. The lock is released when the process is stopped and a start has begun.
|
|
|
|
func (d *DockerEnvironment) Restart() error {
|
|
|
|
err := d.WaitForStop(60, false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the process.
|
|
|
|
return d.Start()
|
|
|
|
}
|
|
|
|
|
2020-04-03 20:43:13 +00:00
|
|
|
// Attempts to gracefully stop a server using the defined stop command. If the server
|
|
|
|
// does not stop after seconds have passed, an error will be returned, or the instance
|
|
|
|
// will be terminated forcefully depending on the value of the second argument.
|
|
|
|
func (d *DockerEnvironment) WaitForStop(seconds int, terminate bool) error {
|
2020-04-11 01:22:18 +00:00
|
|
|
if d.Server.GetState() == ProcessOfflineState {
|
2020-08-04 23:19:13 +00:00
|
|
|
log.WithField("server", d.Server.Id()).Debug("server is already offline, not waiting for stop.")
|
2020-04-03 20:43:13 +00:00
|
|
|
return nil
|
|
|
|
}
|
2020-08-04 23:19:13 +00:00
|
|
|
log.WithField("server", d.Server.Id()).Debug("waiting for server to stop")
|
2020-04-03 20:43:13 +00:00
|
|
|
|
|
|
|
if err := d.Stop(); err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(seconds)*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
// Block the return of this function until the container as been marked as no
|
|
|
|
// longer running. If this wait does not end by the time seconds have passed,
|
|
|
|
// attempt to terminate the container, or return an error.
|
2020-07-18 23:03:25 +00:00
|
|
|
ok, errChan := d.Client.ContainerWait(ctx, d.Server.Id(), container.WaitConditionNotRunning)
|
2020-04-03 20:43:13 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
if ctxErr := ctx.Err(); ctxErr != nil {
|
|
|
|
if terminate {
|
|
|
|
return d.Terminate(os.Kill)
|
|
|
|
}
|
|
|
|
|
|
|
|
return errors.WithStack(ctxErr)
|
|
|
|
}
|
|
|
|
case err := <-errChan:
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
case <-ok:
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-06 04:59:27 +00:00
|
|
|
// Forcefully terminates the container using the signal passed through.
|
2019-04-04 05:01:11 +00:00
|
|
|
func (d *DockerEnvironment) Terminate(signal os.Signal) error {
|
2020-07-18 23:03:25 +00:00
|
|
|
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
2019-04-06 04:59:27 +00:00
|
|
|
if err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-04-06 04:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !c.State.Running {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-04 23:19:13 +00:00
|
|
|
// We set it to stopping than offline to prevent crash detection from being triggered.
|
|
|
|
_ = d.Server.SetState(ProcessStoppingState)
|
|
|
|
_ = d.Server.SetState(ProcessOfflineState)
|
2019-09-23 04:22:16 +00:00
|
|
|
|
|
|
|
return d.Client.ContainerKill(
|
2020-07-18 23:03:25 +00:00
|
|
|
context.Background(), d.Server.Id(), strings.TrimSuffix(strings.TrimPrefix(signal.String(), "signal "), "ed"),
|
2019-09-23 04:22:16 +00:00
|
|
|
)
|
2019-04-04 05:01:11 +00:00
|
|
|
}
|
|
|
|
|
2019-12-22 07:23:56 +00:00
|
|
|
// Remove the Docker container from the machine. If the container is currently running
|
|
|
|
// it will be forcibly stopped by Docker.
|
|
|
|
func (d *DockerEnvironment) Destroy() error {
|
2020-08-04 23:19:13 +00:00
|
|
|
// We set it to stopping than offline to prevent crash detection from being triggered.
|
|
|
|
_ = d.Server.SetState(ProcessStoppingState)
|
|
|
|
_ = d.Server.SetState(ProcessOfflineState)
|
2020-01-19 22:00:59 +00:00
|
|
|
|
2020-07-18 23:03:25 +00:00
|
|
|
err := d.Client.ContainerRemove(context.Background(), d.Server.Id(), types.ContainerRemoveOptions{
|
2019-12-22 07:23:56 +00:00
|
|
|
RemoveVolumes: true,
|
|
|
|
RemoveLinks: false,
|
|
|
|
Force: true,
|
|
|
|
})
|
2020-05-10 00:16:41 +00:00
|
|
|
|
|
|
|
// Don't trigger a destroy failure if we try to delete a container that does not
|
|
|
|
// exist on the system. We're just a step ahead of ourselves in that case.
|
|
|
|
//
|
|
|
|
// @see https://github.com/pterodactyl/panel/issues/2001
|
|
|
|
if err != nil && client.IsErrNotFound(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
2019-12-22 07:23:56 +00:00
|
|
|
}
|
|
|
|
|
2019-12-01 00:43:18 +00:00
|
|
|
// Determine the container exit state and return the exit code and wether or not
|
|
|
|
// the container was killed by the OOM killer.
|
|
|
|
func (d *DockerEnvironment) ExitState() (uint32, bool, error) {
|
2020-07-18 23:03:25 +00:00
|
|
|
c, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
2019-12-01 00:43:18 +00:00
|
|
|
if err != nil {
|
2020-05-08 04:08:06 +00:00
|
|
|
// I'm not entirely sure how this can happen to be honest. I tried deleting a
|
|
|
|
// container _while_ a server was running and wings gracefully saw the crash and
|
|
|
|
// created a new container for it.
|
|
|
|
//
|
|
|
|
// However, someone reported an error in Discord about this scenario happening,
|
|
|
|
// so I guess this should prevent it? They didn't tell me how they caused it though
|
2020-07-31 22:19:09 +00:00
|
|
|
// so that's a mystery that will have to go unsolved.
|
2020-05-08 04:08:06 +00:00
|
|
|
//
|
|
|
|
// @see https://github.com/pterodactyl/panel/issues/2003
|
|
|
|
if client.IsErrNotFound(err) {
|
|
|
|
return 1, false, nil
|
|
|
|
}
|
|
|
|
|
2019-12-01 00:43:18 +00:00
|
|
|
return 0, false, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return uint32(c.State.ExitCode), c.State.OOMKilled, nil
|
|
|
|
}
|
|
|
|
|
2019-04-20 23:20:08 +00:00
|
|
|
// Attaches to the docker container itself and ensures that we can pipe data in and out
|
|
|
|
// of the process stream. This should not be used for reading console data as you *will*
|
|
|
|
// miss important output at the beginning because of the time delay with attaching to the
|
|
|
|
// output.
|
|
|
|
func (d *DockerEnvironment) Attach() error {
|
2020-07-18 23:03:25 +00:00
|
|
|
if d.IsAttached() {
|
2019-04-20 23:20:08 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-30 00:52:55 +00:00
|
|
|
if err := d.FollowConsoleOutput(); err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-06-30 00:52:55 +00:00
|
|
|
}
|
|
|
|
|
2020-08-01 22:34:14 +00:00
|
|
|
opts := types.ContainerAttachOptions{
|
2019-05-10 04:48:58 +00:00
|
|
|
Stdin: true,
|
2019-04-20 23:20:08 +00:00
|
|
|
Stdout: true,
|
|
|
|
Stderr: true,
|
|
|
|
Stream: true,
|
2020-08-01 22:34:14 +00:00
|
|
|
}
|
2019-04-20 23:20:08 +00:00
|
|
|
|
2020-08-01 22:34:14 +00:00
|
|
|
// Set the stream again with the container.
|
|
|
|
if st, err := d.Client.ContainerAttach(context.Background(), d.Server.Id(), opts); err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2020-08-01 22:34:14 +00:00
|
|
|
} else {
|
|
|
|
d.SetStream(&st)
|
2019-04-20 23:20:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
console := Console{
|
|
|
|
Server: d.Server,
|
|
|
|
}
|
|
|
|
|
2019-12-25 00:40:51 +00:00
|
|
|
go func() {
|
|
|
|
if err := d.EnableResourcePolling(); err != nil {
|
2020-05-29 05:07:53 +00:00
|
|
|
d.Server.Log().WithField("error", errors.WithStack(err)).Warn("failed to enable resource polling on server")
|
2019-12-25 00:40:51 +00:00
|
|
|
}
|
|
|
|
}()
|
2019-04-20 23:20:08 +00:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer d.stream.Close()
|
|
|
|
defer func() {
|
2020-08-04 23:19:13 +00:00
|
|
|
_ = d.Server.SetState(ProcessOfflineState)
|
2020-08-01 22:34:14 +00:00
|
|
|
d.SetStream(nil)
|
2019-04-20 23:20:08 +00:00
|
|
|
}()
|
|
|
|
|
2020-08-04 23:19:13 +00:00
|
|
|
_, _ = io.Copy(console, d.stream.Reader)
|
2019-04-20 23:20:08 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attaches to the log for the container. This avoids us missing cruicial output that
|
|
|
|
// happens in the split seconds before the code moves from 'Starting' to 'Attaching'
|
|
|
|
// on the process.
|
|
|
|
func (d *DockerEnvironment) FollowConsoleOutput() error {
|
|
|
|
if exists, err := d.Exists(); !exists {
|
|
|
|
if err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-04-20 23:20:08 +00:00
|
|
|
}
|
|
|
|
|
2020-07-18 23:03:25 +00:00
|
|
|
return errors.New(fmt.Sprintf("no such container: %s", d.Server.Id()))
|
2019-04-06 19:27:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
opts := types.ContainerLogsOptions{
|
|
|
|
ShowStderr: true,
|
|
|
|
ShowStdout: true,
|
|
|
|
Follow: true,
|
2019-09-23 03:47:38 +00:00
|
|
|
Since: time.Now().Format(time.RFC3339),
|
2019-04-06 19:27:44 +00:00
|
|
|
}
|
|
|
|
|
2020-07-18 23:03:25 +00:00
|
|
|
reader, err := d.Client.ContainerLogs(context.Background(), d.Server.Id(), opts)
|
2019-04-20 23:20:08 +00:00
|
|
|
|
|
|
|
go func(r io.ReadCloser) {
|
|
|
|
defer r.Close()
|
|
|
|
|
|
|
|
s := bufio.NewScanner(r)
|
|
|
|
for s.Scan() {
|
2020-01-18 22:04:26 +00:00
|
|
|
d.Server.Events().Publish(ConsoleOutputEvent, s.Text())
|
2019-04-20 23:20:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.Err(); err != nil {
|
2020-05-29 05:07:53 +00:00
|
|
|
d.Server.Log().WithField("error", err).Warn("error processing scanner line in console output")
|
2019-04-20 23:20:08 +00:00
|
|
|
}
|
|
|
|
}(reader)
|
2019-04-06 19:27:44 +00:00
|
|
|
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-04-06 19:27:44 +00:00
|
|
|
}
|
|
|
|
|
2019-08-17 20:19:56 +00:00
|
|
|
// Enables resource polling on the docker instance. Except we aren't actually polling Docker for this
|
|
|
|
// information, instead just sit there with an async process that lets Docker stream all of this data
|
|
|
|
// to us automatically.
|
|
|
|
func (d *DockerEnvironment) EnableResourcePolling() error {
|
2020-04-11 01:22:18 +00:00
|
|
|
if d.Server.GetState() == ProcessOfflineState {
|
2019-08-17 20:19:56 +00:00
|
|
|
return errors.New("cannot enable resource polling on a server that is not running")
|
|
|
|
}
|
|
|
|
|
2020-07-18 23:03:25 +00:00
|
|
|
stats, err := d.Client.ContainerStats(context.Background(), d.Server.Id(), true)
|
2019-08-17 20:19:56 +00:00
|
|
|
if err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-08-17 20:19:56 +00:00
|
|
|
}
|
|
|
|
d.stats = stats.Body
|
|
|
|
|
|
|
|
dec := json.NewDecoder(d.stats)
|
|
|
|
go func(s *Server) {
|
2019-08-17 23:10:48 +00:00
|
|
|
for {
|
2019-08-17 20:19:56 +00:00
|
|
|
var v *types.StatsJSON
|
|
|
|
|
|
|
|
if err := dec.Decode(&v); err != nil {
|
2019-12-25 00:40:51 +00:00
|
|
|
if err != io.EOF {
|
2020-05-29 05:07:53 +00:00
|
|
|
d.Server.Log().WithField("error", err).Warn("encountered error processing server stats, stopping collection")
|
2019-12-25 00:40:51 +00:00
|
|
|
}
|
|
|
|
|
2020-08-04 23:19:13 +00:00
|
|
|
_ = d.DisableResourcePolling()
|
2019-08-17 20:19:56 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disable collection if the server is in an offline state and this process is
|
|
|
|
// still running.
|
2020-04-11 01:22:18 +00:00
|
|
|
if s.GetState() == ProcessOfflineState {
|
2020-08-04 23:19:13 +00:00
|
|
|
_ = d.DisableResourcePolling()
|
2019-08-17 20:19:56 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-07-19 23:27:55 +00:00
|
|
|
s.Proc().UpdateFromDocker(v)
|
|
|
|
for _, nw := range v.Networks {
|
|
|
|
s.Proc().UpdateNetworkBytes(&nw)
|
|
|
|
}
|
2019-08-17 23:10:48 +00:00
|
|
|
|
|
|
|
// Why you ask? This already has the logic for caching disk space in use and then
|
|
|
|
// also handles pushing that value to the resources object automatically.
|
|
|
|
s.Filesystem.HasSpaceAvailable()
|
2019-08-17 20:19:56 +00:00
|
|
|
|
2020-07-19 23:27:55 +00:00
|
|
|
b, _ := json.Marshal(s.Proc())
|
2020-01-18 22:04:26 +00:00
|
|
|
s.Events().Publish(StatsEvent, string(b))
|
2019-08-17 20:19:56 +00:00
|
|
|
}
|
|
|
|
}(d.Server)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Closes the stats stream for a server process.
|
|
|
|
func (d *DockerEnvironment) DisableResourcePolling() error {
|
|
|
|
if d.stats == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-17 23:10:48 +00:00
|
|
|
err := d.stats.Close()
|
2020-07-19 23:27:55 +00:00
|
|
|
d.Server.Proc().Empty()
|
2019-08-17 23:10:48 +00:00
|
|
|
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-08-17 20:19:56 +00:00
|
|
|
}
|
|
|
|
|
2020-07-19 23:27:55 +00:00
|
|
|
// Returns the image to be used for the instance.
|
|
|
|
func (d *DockerEnvironment) Image() string {
|
|
|
|
return d.Server.Config().Container.Image
|
|
|
|
}
|
|
|
|
|
2020-05-29 02:26:41 +00:00
|
|
|
// Pulls the image from Docker. If there is an error while pulling the image from the source
|
|
|
|
// but the image already exists locally, we will report that error to the logger but continue
|
|
|
|
// with the process.
|
|
|
|
//
|
|
|
|
// The reasoning behind this is that Quay has had some serious outages as of late, and we don't
|
|
|
|
// need to block all of the servers from booting just because of that. I'd imagine in a lot of
|
|
|
|
// cases an outage shouldn't affect users too badly. It'll at least keep existing servers working
|
|
|
|
// correctly if anything.
|
2019-12-07 22:19:51 +00:00
|
|
|
//
|
|
|
|
// @todo handle authorization & local images
|
2020-07-04 21:14:22 +00:00
|
|
|
func (d *DockerEnvironment) ensureImageExists() error {
|
2020-06-23 03:56:55 +00:00
|
|
|
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
|
|
|
|
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
|
|
|
|
// an image. Let me know when I am inevitably wrong here...
|
2020-07-04 21:14:22 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
|
|
|
|
defer cancel()
|
2020-05-29 02:26:41 +00:00
|
|
|
|
2020-08-01 00:28:40 +00:00
|
|
|
image := d.Image()
|
|
|
|
|
|
|
|
// Get a registry auth configuration from the config.
|
|
|
|
var registryAuth *config.RegistryConfiguration
|
|
|
|
for registry, c := range config.Get().Docker.Registries {
|
|
|
|
if !strings.HasPrefix(image, registry) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-08-04 23:19:13 +00:00
|
|
|
log.WithField("registry", registry).Debug("using authentication for registry")
|
2020-08-01 00:28:40 +00:00
|
|
|
registryAuth = &c
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the ImagePullOptions.
|
|
|
|
imagePullOptions := types.ImagePullOptions{All: false}
|
|
|
|
if registryAuth != nil {
|
|
|
|
b64, err := registryAuth.Base64()
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("failed to get registry auth credentials")
|
|
|
|
}
|
|
|
|
|
|
|
|
// b64 is a string so if there is an error it will just be empty, not nil.
|
|
|
|
imagePullOptions.RegistryAuth = b64
|
|
|
|
}
|
|
|
|
|
|
|
|
out, err := d.Client.ImagePull(ctx, image, imagePullOptions)
|
2019-12-07 22:19:51 +00:00
|
|
|
if err != nil {
|
2020-07-04 21:14:22 +00:00
|
|
|
images, ierr := d.Client.ImageList(ctx, types.ImageListOptions{})
|
2020-05-29 02:26:41 +00:00
|
|
|
if ierr != nil {
|
|
|
|
// Well damn, something has gone really wrong here, just go ahead and abort there
|
|
|
|
// isn't much anything we can do to try and self-recover from this.
|
|
|
|
return ierr
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, img := range images {
|
|
|
|
for _, t := range img.RepoTags {
|
2020-07-19 23:27:55 +00:00
|
|
|
if t != d.Image() {
|
2020-07-04 21:14:22 +00:00
|
|
|
continue
|
2020-05-29 02:26:41 +00:00
|
|
|
}
|
2020-07-04 21:14:22 +00:00
|
|
|
|
|
|
|
d.Server.Log().WithFields(log.Fields{
|
2020-07-19 23:27:55 +00:00
|
|
|
"image": d.Image(),
|
2020-07-04 21:14:22 +00:00
|
|
|
"error": errors.New(err.Error()),
|
|
|
|
}).Warn("unable to pull requested image from remote source, however the image exists locally")
|
|
|
|
|
|
|
|
// Okay, we found a matching container image, in that case just go ahead and return
|
|
|
|
// from this function, since there is nothing else we need to do here.
|
|
|
|
return nil
|
2020-05-29 02:26:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-07 22:19:51 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer out.Close()
|
|
|
|
|
2020-07-19 23:27:55 +00:00
|
|
|
log.WithField("image", d.Image()).Debug("pulling docker image... this could take a bit of time")
|
2019-12-07 22:19:51 +00:00
|
|
|
|
|
|
|
// I'm not sure what the best approach here is, but this will block execution until the image
|
|
|
|
// is done being pulled, which is what we need.
|
|
|
|
scanner := bufio.NewScanner(out)
|
|
|
|
for scanner.Scan() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := scanner.Err(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-04 05:01:11 +00:00
|
|
|
// Creates a new container for the server using all of the data that is currently
|
|
|
|
// available for it. If the container already exists it will be returned.
|
|
|
|
func (d *DockerEnvironment) Create() error {
|
2019-11-16 23:48:50 +00:00
|
|
|
// Ensure the data directory exists before getting too far through this process.
|
|
|
|
if err := d.Server.Filesystem.EnsureDataDirectory(); err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-11-16 23:48:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-04 05:01:11 +00:00
|
|
|
// If the container already exists don't hit the user with an error, just return
|
|
|
|
// the current information about it which is what we would do when creating the
|
|
|
|
// container anyways.
|
2020-07-18 23:03:25 +00:00
|
|
|
if _, err := d.Client.ContainerInspect(context.Background(), d.Server.Id()); err == nil {
|
2019-04-04 05:01:11 +00:00
|
|
|
return nil
|
2019-04-20 23:20:08 +00:00
|
|
|
} else if !client.IsErrNotFound(err) {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-04-04 05:01:11 +00:00
|
|
|
}
|
|
|
|
|
2019-12-07 22:19:51 +00:00
|
|
|
// Try to pull the requested image before creating the container.
|
2020-07-04 21:14:22 +00:00
|
|
|
if err := d.ensureImageExists(); err != nil {
|
2019-12-07 22:19:51 +00:00
|
|
|
return errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
2019-04-04 05:01:11 +00:00
|
|
|
conf := &container.Config{
|
2020-07-18 23:03:25 +00:00
|
|
|
Hostname: d.Server.Id(),
|
2020-07-04 21:14:22 +00:00
|
|
|
Domainname: config.Get().Docker.Domainname,
|
2019-12-25 00:40:51 +00:00
|
|
|
User: strconv.Itoa(config.Get().System.User.Uid),
|
2019-04-04 05:49:15 +00:00
|
|
|
AttachStdin: true,
|
2019-04-04 05:01:11 +00:00
|
|
|
AttachStdout: true,
|
|
|
|
AttachStderr: true,
|
2019-04-04 05:49:15 +00:00
|
|
|
OpenStdin: true,
|
|
|
|
Tty: true,
|
2019-04-04 06:09:15 +00:00
|
|
|
ExposedPorts: d.exposedPorts(),
|
2020-07-19 23:27:55 +00:00
|
|
|
Image: d.Image(),
|
|
|
|
Env: d.Server.GetEnvironmentVariables(),
|
2019-04-04 05:01:11 +00:00
|
|
|
Labels: map[string]string{
|
2020-01-18 21:05:44 +00:00
|
|
|
"Service": "Pterodactyl",
|
2019-12-28 22:57:19 +00:00
|
|
|
"ContainerType": "server_process",
|
2019-04-04 05:01:11 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-08-02 03:24:43 +00:00
|
|
|
mounts, err := d.getContainerMounts()
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithMessage(err, "could not build container mount points slice")
|
2020-05-21 20:53:00 +00:00
|
|
|
}
|
|
|
|
|
2020-08-02 03:24:43 +00:00
|
|
|
customMounts, err := d.getCustomMounts()
|
|
|
|
if err != nil {
|
|
|
|
return errors.WithMessage(err, "could not build custom container mount points slice")
|
|
|
|
}
|
2020-07-04 22:20:58 +00:00
|
|
|
|
2020-08-02 03:24:43 +00:00
|
|
|
if len(customMounts) > 0 {
|
|
|
|
mounts = append(mounts, customMounts...)
|
2020-07-18 23:03:25 +00:00
|
|
|
|
2020-08-02 03:24:43 +00:00
|
|
|
for _, m := range customMounts {
|
|
|
|
d.Server.Log().WithFields(log.Fields{
|
|
|
|
"source_path": m.Source,
|
|
|
|
"target_path": m.Target,
|
2020-08-04 23:19:13 +00:00
|
|
|
"read_only": m.ReadOnly,
|
2020-08-02 03:24:43 +00:00
|
|
|
}).Debug("attaching custom server mount point to container")
|
2020-07-04 21:32:53 +00:00
|
|
|
}
|
2020-05-21 20:53:00 +00:00
|
|
|
}
|
|
|
|
|
2019-04-04 05:01:11 +00:00
|
|
|
hostConf := &container.HostConfig{
|
2019-04-04 06:09:15 +00:00
|
|
|
PortBindings: d.portBindings(),
|
|
|
|
|
2019-04-04 05:49:15 +00:00
|
|
|
// Configure the mounts for this container. First mount the server data directory
|
2019-12-25 00:49:08 +00:00
|
|
|
// into the container as a r/w bind.
|
2020-05-21 20:53:00 +00:00
|
|
|
Mounts: mounts,
|
2019-04-04 05:49:15 +00:00
|
|
|
|
|
|
|
// Configure the /tmp folder mapping in containers. This is necessary for some
|
|
|
|
// games that need to make use of it for downloads and other installation processes.
|
|
|
|
Tmpfs: map[string]string{
|
|
|
|
"/tmp": "rw,exec,nosuid,size=50M",
|
|
|
|
},
|
|
|
|
|
|
|
|
// Define resource limits for the container based on the data passed through
|
|
|
|
// from the Panel.
|
2019-11-24 23:08:38 +00:00
|
|
|
Resources: d.getResourcesForServer(),
|
2019-04-04 05:49:15 +00:00
|
|
|
|
2020-04-25 19:18:18 +00:00
|
|
|
DNS: config.Get().Docker.Network.Dns,
|
2019-04-04 05:49:15 +00:00
|
|
|
|
|
|
|
// Configure logging for the container to make it easier on the Daemon to grab
|
|
|
|
// the server output. Ensure that we don't use too much space on the host machine
|
|
|
|
// since we only need it for the last few hundred lines of output and don't care
|
|
|
|
// about anything else in it.
|
|
|
|
LogConfig: container.LogConfig{
|
|
|
|
Type: jsonfilelog.Name,
|
|
|
|
Config: map[string]string{
|
|
|
|
"max-size": "5m",
|
|
|
|
"max-file": "1",
|
|
|
|
},
|
2019-04-04 05:01:11 +00:00
|
|
|
},
|
2019-04-04 05:49:15 +00:00
|
|
|
|
|
|
|
SecurityOpt: []string{"no-new-privileges"},
|
|
|
|
ReadonlyRootfs: true,
|
|
|
|
CapDrop: []string{
|
|
|
|
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
|
|
|
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
|
|
|
},
|
2020-05-10 01:29:56 +00:00
|
|
|
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
2019-04-04 05:01:11 +00:00
|
|
|
}
|
|
|
|
|
2020-07-18 23:03:25 +00:00
|
|
|
if _, err := d.Client.ContainerCreate(context.Background(), conf, hostConf, nil, d.Server.Id()); err != nil {
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-04-04 05:01:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-02 03:24:43 +00:00
|
|
|
// Returns the default container mounts for the server instance. This includes the data directory
|
|
|
|
// for the server as well as any timezone related files if they exist on the host system so that
|
|
|
|
// servers running within the container will use the correct time.
|
|
|
|
func (d *DockerEnvironment) getContainerMounts() ([]mount.Mount, error) {
|
|
|
|
var m []mount.Mount
|
|
|
|
|
|
|
|
m = append(m, mount.Mount{
|
|
|
|
Target: "/home/container",
|
|
|
|
Source: d.Server.Filesystem.Path(),
|
|
|
|
Type: mount.TypeBind,
|
|
|
|
ReadOnly: false,
|
|
|
|
})
|
|
|
|
|
|
|
|
// Try to mount in /etc/localtime and /etc/timezone if they exist on the host system.
|
|
|
|
if _, err := os.Stat("/etc/localtime"); err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
m = append(m, mount.Mount{
|
|
|
|
Target: "/etc/localtime",
|
|
|
|
Source: "/etc/localtime",
|
|
|
|
Type: mount.TypeBind,
|
|
|
|
ReadOnly: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := os.Stat("/etc/timezone"); err != nil {
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
m = append(m, mount.Mount{
|
|
|
|
Target: "/etc/timezone",
|
|
|
|
Source: "/etc/timezone",
|
|
|
|
Type: mount.TypeBind,
|
|
|
|
ReadOnly: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the custom mounts for a given server after verifying that they are within a list of
|
|
|
|
// allowed mount points for the node.
|
|
|
|
func (d *DockerEnvironment) getCustomMounts() ([]mount.Mount, error) {
|
|
|
|
var mounts []mount.Mount
|
|
|
|
|
|
|
|
// TODO: probably need to handle things trying to mount directories that do not exist.
|
|
|
|
for _, m := range d.Server.Config().Mounts {
|
|
|
|
source := filepath.Clean(m.Source)
|
|
|
|
target := filepath.Clean(m.Target)
|
|
|
|
|
|
|
|
logger := d.Server.Log().WithFields(log.Fields{
|
|
|
|
"source_path": source,
|
|
|
|
"target_path": target,
|
|
|
|
"read_only": m.ReadOnly,
|
|
|
|
})
|
|
|
|
|
|
|
|
mounted := false
|
|
|
|
for _, allowed := range config.Get().AllowedMounts {
|
|
|
|
if !strings.HasPrefix(source, allowed) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
mounted = true
|
|
|
|
mounts = append(mounts, mount.Mount{
|
|
|
|
Source: source,
|
|
|
|
Target: target,
|
|
|
|
Type: mount.TypeBind,
|
|
|
|
ReadOnly: m.ReadOnly,
|
|
|
|
})
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if !mounted {
|
|
|
|
logger.Warn("skipping custom server mount, not in list of allowed mount points")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return mounts, nil
|
|
|
|
}
|
|
|
|
|
2019-04-20 23:20:08 +00:00
|
|
|
// Sends the specified command to the stdin of the running container instance. There is no
|
|
|
|
// confirmation that this data is sent successfully, only that it gets pushed into the stdin.
|
|
|
|
func (d *DockerEnvironment) SendCommand(c string) error {
|
2020-08-01 22:34:14 +00:00
|
|
|
d.RLock()
|
|
|
|
defer d.RUnlock()
|
|
|
|
|
2020-07-18 23:03:25 +00:00
|
|
|
if !d.IsAttached() {
|
2019-04-20 23:20:08 +00:00
|
|
|
return errors.New("attempting to send command to non-attached instance")
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err := d.stream.Conn.Write([]byte(c + "\n"))
|
|
|
|
|
2019-11-17 01:05:21 +00:00
|
|
|
return errors.WithStack(err)
|
2019-04-20 23:20:08 +00:00
|
|
|
}
|
|
|
|
|
2019-04-06 19:27:44 +00:00
|
|
|
// Reads the log file for the server. This does not care if the server is running or not, it will
|
|
|
|
// simply try to read the last X bytes of the file and return them.
|
|
|
|
func (d *DockerEnvironment) Readlog(len int64) ([]string, error) {
|
2020-07-18 23:03:25 +00:00
|
|
|
j, err := d.Client.ContainerInspect(context.Background(), d.Server.Id())
|
2019-04-06 19:27:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if j.LogPath == "" {
|
|
|
|
return nil, errors.New("empty log path defined for server")
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := os.Open(j.LogPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer f.Close()
|
|
|
|
|
|
|
|
// Check if the length of the file is smaller than the amount of data that was requested
|
|
|
|
// for reading. If so, adjust the length to be the total length of the file. If this is not
|
|
|
|
// done an error is thrown since we're reading backwards, and not forwards.
|
|
|
|
if stat, err := os.Stat(j.LogPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else if stat.Size() < len {
|
|
|
|
len = stat.Size()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Seed to the end of the file and then move backwards until the length is met to avoid
|
|
|
|
// reading the entirety of the file into memory.
|
|
|
|
if _, err := f.Seek(-len, io.SeekEnd); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
b := make([]byte, len)
|
|
|
|
|
|
|
|
if _, err := f.Read(b); err != nil && err != io.EOF {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return d.parseLogToStrings(b)
|
|
|
|
}
|
|
|
|
|
|
|
|
type dockerLogLine struct {
|
2019-05-10 04:48:58 +00:00
|
|
|
Log string `json:"log"`
|
2019-04-06 19:27:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Docker stores the logs for server output in a JSON format. This function will iterate over the JSON
|
|
|
|
// that was read from the log file and parse it into a more human readable format.
|
|
|
|
func (d *DockerEnvironment) parseLogToStrings(b []byte) ([]string, error) {
|
|
|
|
var hasError = false
|
|
|
|
var out []string
|
|
|
|
|
|
|
|
scanner := bufio.NewScanner(bytes.NewReader(b))
|
|
|
|
for scanner.Scan() {
|
|
|
|
var l dockerLogLine
|
|
|
|
// Unmarshal the contents and allow up to a single error before bailing out of the process. We
|
|
|
|
// do this because if you're arbitrarily reading a length of the file you'll likely end up
|
|
|
|
// with the first line in the output being improperly formatted JSON. In those cases we want to
|
|
|
|
// just skip over it. However if we see another error we're going to bail out because that is an
|
|
|
|
// abnormal situation.
|
|
|
|
if err := json.Unmarshal([]byte(scanner.Text()), &l); err != nil {
|
|
|
|
if hasError {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
hasError = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
out = append(out, l.Log)
|
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2019-04-04 06:09:15 +00:00
|
|
|
// Converts the server allocation mappings into a format that can be understood
|
|
|
|
// by Docker.
|
|
|
|
func (d *DockerEnvironment) portBindings() nat.PortMap {
|
|
|
|
var out = nat.PortMap{}
|
|
|
|
|
2020-07-19 23:27:55 +00:00
|
|
|
for ip, ports := range d.Server.Config().Allocations.Mappings {
|
2019-04-04 06:09:15 +00:00
|
|
|
for _, port := range ports {
|
|
|
|
// Skip over invalid ports.
|
2020-07-31 22:19:09 +00:00
|
|
|
if port < 1 || port > 65535 {
|
2019-04-04 06:09:15 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
binding := []nat.PortBinding{
|
|
|
|
{
|
|
|
|
HostIP: ip,
|
2019-04-04 06:54:38 +00:00
|
|
|
HostPort: strconv.Itoa(port),
|
2019-04-04 06:09:15 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2019-04-04 06:45:15 +00:00
|
|
|
out[nat.Port(fmt.Sprintf("%d/tcp", port))] = binding
|
|
|
|
out[nat.Port(fmt.Sprintf("%d/udp", port))] = binding
|
2019-04-04 06:09:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
// Converts the server allocation mappings into a PortSet that can be understood
|
|
|
|
// by Docker. This formatting is slightly different than portBindings as it should
|
|
|
|
// return an empty struct rather than a binding.
|
|
|
|
//
|
|
|
|
// To accomplish this, we'll just get the values from portBindings and then set them
|
|
|
|
// to empty structs. Because why not.
|
|
|
|
func (d *DockerEnvironment) exposedPorts() nat.PortSet {
|
|
|
|
var out = nat.PortSet{}
|
|
|
|
|
|
|
|
for port := range d.portBindings() {
|
|
|
|
out[port] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
return out
|
2019-08-17 23:10:48 +00:00
|
|
|
}
|
2019-11-24 23:08:38 +00:00
|
|
|
|
|
|
|
// Formats the resources available to a server instance in such as way that Docker will
|
|
|
|
// generate a matching environment in the container.
|
2020-05-09 03:57:00 +00:00
|
|
|
//
|
|
|
|
// This will set the actual memory limit on the container using the multiplier which is the
|
|
|
|
// hard limit for the container (after which will result in a crash). We then set the
|
|
|
|
// reservation to be the expected memory limit based on simply multiplication.
|
|
|
|
//
|
|
|
|
// The swap value is either -1 to disable it, or set to the value of the hard memory limit
|
|
|
|
// plus the additional swap assigned to the server since Docker expects this value to be
|
|
|
|
// the same or higher than the memory limit.
|
2019-11-24 23:08:38 +00:00
|
|
|
func (d *DockerEnvironment) getResourcesForServer() container.Resources {
|
|
|
|
return container.Resources{
|
2020-07-19 23:27:55 +00:00
|
|
|
Memory: d.Server.Build().BoundedMemoryLimit(),
|
|
|
|
MemoryReservation: d.Server.Build().MemoryLimit * 1_000_000,
|
|
|
|
MemorySwap: d.Server.Build().ConvertedSwap(),
|
|
|
|
CPUQuota: d.Server.Build().ConvertedCpuLimit(),
|
2020-05-09 03:57:00 +00:00
|
|
|
CPUPeriod: 100_000,
|
2019-11-25 03:57:20 +00:00
|
|
|
CPUShares: 1024,
|
2020-07-19 23:27:55 +00:00
|
|
|
BlkioWeight: d.Server.Build().IoWeight,
|
|
|
|
OomKillDisable: &d.Server.Config().Container.OomDisabled,
|
|
|
|
CpusetCpus: d.Server.Build().Threads,
|
2019-11-24 23:08:38 +00:00
|
|
|
}
|
|
|
|
}
|