2020-08-11 04:38:42 +00:00
|
|
|
package docker
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"context"
|
2020-12-16 05:56:53 +00:00
|
|
|
"emperror.dev/errors"
|
2020-09-13 04:37:48 +00:00
|
|
|
"encoding/json"
|
2020-08-11 04:38:42 +00:00
|
|
|
"fmt"
|
|
|
|
"github.com/apex/log"
|
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
"github.com/docker/docker/api/types/container"
|
|
|
|
"github.com/docker/docker/api/types/mount"
|
|
|
|
"github.com/docker/docker/client"
|
|
|
|
"github.com/docker/docker/daemon/logger/jsonfilelog"
|
|
|
|
"github.com/pterodactyl/wings/config"
|
|
|
|
"github.com/pterodactyl/wings/environment"
|
2020-12-26 01:07:57 +00:00
|
|
|
"github.com/pterodactyl/wings/system"
|
2020-08-11 04:38:42 +00:00
|
|
|
"io"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2020-09-13 04:37:48 +00:00
|
|
|
type imagePullStatus struct {
|
|
|
|
Status string `json:"status"`
|
|
|
|
Progress string `json:"progress"`
|
|
|
|
}
|
|
|
|
|
2020-08-11 04:38:42 +00:00
|
|
|
// Attaches to the docker container itself and ensures that we can pipe data in and out
|
|
|
|
// of the process stream. This should not be used for reading console data as you *will*
|
|
|
|
// miss important output at the beginning because of the time delay with attaching to the
|
|
|
|
// output.
|
|
|
|
func (e *Environment) Attach() error {
|
|
|
|
if e.IsAttached() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := e.followOutput(); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
opts := types.ContainerAttachOptions{
|
|
|
|
Stdin: true,
|
|
|
|
Stdout: true,
|
|
|
|
Stderr: true,
|
|
|
|
Stream: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the stream again with the container.
|
|
|
|
if st, err := e.client.ContainerAttach(context.Background(), e.Id, opts); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-08-11 04:38:42 +00:00
|
|
|
} else {
|
|
|
|
e.SetStream(&st)
|
|
|
|
}
|
|
|
|
|
2020-08-19 04:38:42 +00:00
|
|
|
c := new(Console)
|
|
|
|
go func(console *Console) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
2020-08-11 04:38:42 +00:00
|
|
|
|
2020-08-19 04:38:42 +00:00
|
|
|
defer cancel()
|
2020-08-11 04:38:42 +00:00
|
|
|
defer e.stream.Close()
|
|
|
|
defer func() {
|
2020-11-07 05:53:00 +00:00
|
|
|
e.SetState(environment.ProcessOfflineState)
|
2020-08-11 04:38:42 +00:00
|
|
|
e.SetStream(nil)
|
|
|
|
}()
|
|
|
|
|
2020-09-05 19:08:40 +00:00
|
|
|
// Poll resources in a separate thread since this will block the copy call below
|
|
|
|
// from being reached until it is completed if not run in a separate process. However,
|
2020-08-19 04:38:42 +00:00
|
|
|
// we still want it to be stopped when the copy operation below is finished running which
|
|
|
|
// indicates that the container is no longer running.
|
2020-09-04 04:19:06 +00:00
|
|
|
go func(ctx context.Context) {
|
|
|
|
if err := e.pollResources(ctx); err != nil {
|
2020-11-11 04:36:40 +00:00
|
|
|
l := log.WithField("environment_id", e.Id)
|
|
|
|
if !errors.Is(err, context.Canceled) {
|
2020-11-28 23:57:10 +00:00
|
|
|
l.WithField("error", err).Error("error during environment resource polling")
|
2020-11-11 04:36:40 +00:00
|
|
|
} else {
|
|
|
|
l.Warn("stopping server resource polling: context canceled")
|
|
|
|
}
|
2020-09-04 04:19:06 +00:00
|
|
|
}
|
|
|
|
}(ctx)
|
2020-08-19 04:38:42 +00:00
|
|
|
|
|
|
|
// Stream the reader output to the console which will then fire off events and handle console
|
|
|
|
// throttling and sending the output to the user.
|
2020-09-04 04:21:42 +00:00
|
|
|
if _, err := io.Copy(console, e.stream.Reader); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
log.WithField("environment_id", e.Id).WithField("error", err).Error("error while copying environment output to console")
|
2020-09-04 04:21:42 +00:00
|
|
|
}
|
2020-08-19 04:38:42 +00:00
|
|
|
}(c)
|
2020-08-11 04:38:42 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Environment) resources() container.Resources {
|
|
|
|
l := e.Configuration.Limits()
|
|
|
|
|
|
|
|
return container.Resources{
|
|
|
|
Memory: l.BoundedMemoryLimit(),
|
|
|
|
MemoryReservation: l.MemoryLimit * 1_000_000,
|
|
|
|
MemorySwap: l.ConvertedSwap(),
|
|
|
|
CPUQuota: l.ConvertedCpuLimit(),
|
|
|
|
CPUPeriod: 100_000,
|
|
|
|
CPUShares: 1024,
|
|
|
|
BlkioWeight: l.IoWeight,
|
|
|
|
OomKillDisable: &l.OOMDisabled,
|
|
|
|
CpusetCpus: l.Threads,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Performs an in-place update of the Docker container's resource limits without actually
|
|
|
|
// making any changes to the operational state of the container. This allows memory, cpu,
|
|
|
|
// and IO limitations to be adjusted on the fly for individual instances.
|
|
|
|
func (e *Environment) InSituUpdate() error {
|
|
|
|
if _, err := e.client.ContainerInspect(context.Background(), e.Id); err != nil {
|
|
|
|
// If the container doesn't exist for some reason there really isn't anything
|
|
|
|
// we can do to fix that in this process (it doesn't make sense at least). In those
|
|
|
|
// cases just return without doing anything since we still want to save the configuration
|
|
|
|
// to the disk.
|
|
|
|
//
|
|
|
|
// We'll let a boot process make modifications to the container if needed at this point.
|
|
|
|
if client.IsErrNotFound(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
u := container.UpdateConfig{
|
|
|
|
Resources: e.resources(),
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
|
|
|
|
defer cancel()
|
|
|
|
if _, err := e.client.ContainerUpdate(ctx, e.Id, u); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Creates a new container for the server using all of the data that is currently
|
|
|
|
// available for it. If the container already exists it will be returnee.
|
|
|
|
func (e *Environment) Create() error {
|
|
|
|
// If the container already exists don't hit the user with an error, just return
|
|
|
|
// the current information about it which is what we would do when creating the
|
|
|
|
// container anyways.
|
|
|
|
if _, err := e.client.ContainerInspect(context.Background(), e.Id); err == nil {
|
|
|
|
return nil
|
|
|
|
} else if !client.IsErrNotFound(err) {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to pull the requested image before creating the container.
|
|
|
|
if err := e.ensureImageExists(e.meta.Image); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
a := e.Configuration.Allocations()
|
|
|
|
|
2020-09-07 22:37:35 +00:00
|
|
|
evs := e.Configuration.EnvironmentVariables()
|
|
|
|
for i, v := range evs {
|
|
|
|
// Convert 127.0.0.1 to the pterodactyl0 network interface if the environment is Docker
|
|
|
|
// so that the server operates as expected.
|
|
|
|
if v == "SERVER_IP=127.0.0.1" {
|
2020-09-13 04:37:48 +00:00
|
|
|
evs[i] = "SERVER_IP=" + config.Get().Docker.Network.Interface
|
2020-09-07 22:37:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-11 04:38:42 +00:00
|
|
|
conf := &container.Config{
|
|
|
|
Hostname: e.Id,
|
|
|
|
Domainname: config.Get().Docker.Domainname,
|
|
|
|
User: strconv.Itoa(config.Get().System.User.Uid),
|
|
|
|
AttachStdin: true,
|
|
|
|
AttachStdout: true,
|
|
|
|
AttachStderr: true,
|
|
|
|
OpenStdin: true,
|
|
|
|
Tty: true,
|
|
|
|
ExposedPorts: a.Exposed(),
|
|
|
|
Image: e.meta.Image,
|
2020-08-28 03:28:29 +00:00
|
|
|
Env: e.Configuration.EnvironmentVariables(),
|
2020-08-11 04:38:42 +00:00
|
|
|
Labels: map[string]string{
|
|
|
|
"Service": "Pterodactyl",
|
|
|
|
"ContainerType": "server_process",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-08-13 03:38:02 +00:00
|
|
|
tmpfsSize := strconv.Itoa(int(config.Get().Docker.TmpfsSize))
|
|
|
|
|
2020-08-11 04:38:42 +00:00
|
|
|
hostConf := &container.HostConfig{
|
2020-09-07 22:33:47 +00:00
|
|
|
PortBindings: a.DockerBindings(),
|
2020-08-11 04:38:42 +00:00
|
|
|
|
|
|
|
// Configure the mounts for this container. First mount the server data directory
|
2020-09-05 19:08:40 +00:00
|
|
|
// into the container as a r/w bind.
|
2020-08-11 04:38:42 +00:00
|
|
|
Mounts: e.convertMounts(),
|
|
|
|
|
|
|
|
// Configure the /tmp folder mapping in containers. This is necessary for some
|
|
|
|
// games that need to make use of it for downloads and other installation processes.
|
|
|
|
Tmpfs: map[string]string{
|
2020-09-05 19:08:40 +00:00
|
|
|
"/tmp": "rw,exec,nosuid,size=" + tmpfsSize + "M",
|
2020-08-11 04:38:42 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
// Define resource limits for the container based on the data passed through
|
|
|
|
// from the Panel.
|
|
|
|
Resources: e.resources(),
|
|
|
|
|
|
|
|
DNS: config.Get().Docker.Network.Dns,
|
|
|
|
|
|
|
|
// Configure logging for the container to make it easier on the Daemon to grab
|
|
|
|
// the server output. Ensure that we don't use too much space on the host machine
|
|
|
|
// since we only need it for the last few hundred lines of output and don't care
|
|
|
|
// about anything else in it.
|
|
|
|
LogConfig: container.LogConfig{
|
|
|
|
Type: jsonfilelog.Name,
|
|
|
|
Config: map[string]string{
|
|
|
|
"max-size": "5m",
|
|
|
|
"max-file": "1",
|
2020-12-11 23:51:11 +00:00
|
|
|
"compress": "false",
|
2020-08-11 04:38:42 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
SecurityOpt: []string{"no-new-privileges"},
|
|
|
|
ReadonlyRootfs: true,
|
|
|
|
CapDrop: []string{
|
|
|
|
"setpcap", "mknod", "audit_write", "net_raw", "dac_override",
|
|
|
|
"fowner", "fsetid", "net_bind_service", "sys_chroot", "setfcap",
|
|
|
|
},
|
|
|
|
NetworkMode: container.NetworkMode(config.Get().Docker.Network.Mode),
|
|
|
|
}
|
|
|
|
|
2020-12-27 18:49:08 +00:00
|
|
|
if _, err := e.client.ContainerCreate(context.Background(), conf, hostConf, nil, nil, e.Id); err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *Environment) convertMounts() []mount.Mount {
|
|
|
|
var out []mount.Mount
|
|
|
|
|
|
|
|
for _, m := range e.Configuration.Mounts() {
|
|
|
|
out = append(out, mount.Mount{
|
|
|
|
Type: mount.TypeBind,
|
|
|
|
Source: m.Source,
|
|
|
|
Target: m.Target,
|
|
|
|
ReadOnly: m.ReadOnly,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the Docker container from the machine. If the container is currently running
|
|
|
|
// it will be forcibly stopped by Docker.
|
|
|
|
func (e *Environment) Destroy() error {
|
2020-09-05 19:08:40 +00:00
|
|
|
// We set it to stopping than offline to prevent crash detection from being triggered.
|
2020-11-07 05:53:00 +00:00
|
|
|
e.SetState(environment.ProcessStoppingState)
|
2020-08-11 04:38:42 +00:00
|
|
|
|
|
|
|
err := e.client.ContainerRemove(context.Background(), e.Id, types.ContainerRemoveOptions{
|
|
|
|
RemoveVolumes: true,
|
|
|
|
RemoveLinks: false,
|
|
|
|
Force: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
// Don't trigger a destroy failure if we try to delete a container that does not
|
|
|
|
// exist on the system. We're just a step ahead of ourselves in that case.
|
|
|
|
//
|
|
|
|
// @see https://github.com/pterodactyl/panel/issues/2001
|
|
|
|
if err != nil && client.IsErrNotFound(err) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-11-07 05:53:00 +00:00
|
|
|
e.SetState(environment.ProcessOfflineState)
|
2020-08-11 04:38:42 +00:00
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-09-05 19:08:40 +00:00
|
|
|
// Attaches to the log for the container. This avoids us missing crucial output that
|
2020-08-11 04:38:42 +00:00
|
|
|
// happens in the split seconds before the code moves from 'Starting' to 'Attaching'
|
|
|
|
// on the process.
|
|
|
|
func (e *Environment) followOutput() error {
|
|
|
|
if exists, err := e.Exists(); !exists {
|
|
|
|
if err != nil {
|
2020-11-28 23:57:10 +00:00
|
|
|
return err
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return errors.New(fmt.Sprintf("no such container: %s", e.Id))
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := types.ContainerLogsOptions{
|
|
|
|
ShowStderr: true,
|
|
|
|
ShowStdout: true,
|
|
|
|
Follow: true,
|
|
|
|
Since: time.Now().Format(time.RFC3339),
|
|
|
|
}
|
|
|
|
|
|
|
|
reader, err := e.client.ContainerLogs(context.Background(), e.Id, opts)
|
2020-12-26 01:09:35 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-10-20 01:13:52 +00:00
|
|
|
go func(reader io.ReadCloser) {
|
|
|
|
defer reader.Close()
|
2020-12-26 01:07:57 +00:00
|
|
|
evts := e.Events()
|
|
|
|
err := system.ScanReader(reader, func(line string) {
|
|
|
|
evts.Publish(environment.ConsoleOutputEvent, line)
|
|
|
|
})
|
2020-10-20 01:13:52 +00:00
|
|
|
if err != nil && err != io.EOF {
|
2020-08-11 04:38:42 +00:00
|
|
|
log.WithField("error", err).WithField("container_id", e.Id).Warn("error processing scanner line in console output")
|
|
|
|
}
|
|
|
|
}(reader)
|
2020-12-26 01:09:35 +00:00
|
|
|
return nil
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Pulls the image from Docker. If there is an error while pulling the image from the source
|
|
|
|
// but the image already exists locally, we will report that error to the logger but continue
|
|
|
|
// with the process.
|
|
|
|
//
|
|
|
|
// The reasoning behind this is that Quay has had some serious outages as of late, and we don't
|
|
|
|
// need to block all of the servers from booting just because of that. I'd imagine in a lot of
|
|
|
|
// cases an outage shouldn't affect users too badly. It'll at least keep existing servers working
|
|
|
|
// correctly if anything.
|
|
|
|
func (e *Environment) ensureImageExists(image string) error {
|
2020-09-13 04:37:48 +00:00
|
|
|
e.Events().Publish(environment.DockerImagePullStarted, "")
|
|
|
|
defer e.Events().Publish(environment.DockerImagePullCompleted, "")
|
|
|
|
|
2020-10-19 23:18:33 +00:00
|
|
|
// Images prefixed with a ~ are local images that we do not need to try and pull.
|
|
|
|
if strings.HasPrefix(image, "~") {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-11 04:38:42 +00:00
|
|
|
// Give it up to 15 minutes to pull the image. I think this should cover 99.8% of cases where an
|
|
|
|
// image pull might fail. I can't imagine it will ever take more than 15 minutes to fully pull
|
|
|
|
// an image. Let me know when I am inevitably wrong here...
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
// Get a registry auth configuration from the config.
|
|
|
|
var registryAuth *config.RegistryConfiguration
|
|
|
|
for registry, c := range config.Get().Docker.Registries {
|
|
|
|
if !strings.HasPrefix(image, registry) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
log.WithField("registry", registry).Debug("using authentication for registry")
|
|
|
|
registryAuth = &c
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the ImagePullOptions.
|
|
|
|
imagePullOptions := types.ImagePullOptions{All: false}
|
|
|
|
if registryAuth != nil {
|
|
|
|
b64, err := registryAuth.Base64()
|
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("failed to get registry auth credentials")
|
|
|
|
}
|
|
|
|
|
|
|
|
// b64 is a string so if there is an error it will just be empty, not nil.
|
|
|
|
imagePullOptions.RegistryAuth = b64
|
|
|
|
}
|
|
|
|
|
|
|
|
out, err := e.client.ImagePull(ctx, image, imagePullOptions)
|
|
|
|
if err != nil {
|
|
|
|
images, ierr := e.client.ImageList(ctx, types.ImageListOptions{})
|
|
|
|
if ierr != nil {
|
|
|
|
// Well damn, something has gone really wrong here, just go ahead and abort there
|
|
|
|
// isn't much anything we can do to try and self-recover from this.
|
|
|
|
return ierr
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, img := range images {
|
|
|
|
for _, t := range img.RepoTags {
|
|
|
|
if t != image {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
log.WithFields(log.Fields{
|
|
|
|
"image": image,
|
|
|
|
"container_id": e.Id,
|
2020-09-20 20:20:42 +00:00
|
|
|
"err": err.Error(),
|
2020-08-11 04:38:42 +00:00
|
|
|
}).Warn("unable to pull requested image from remote source, however the image exists locally")
|
|
|
|
|
|
|
|
// Okay, we found a matching container image, in that case just go ahead and return
|
|
|
|
// from this function, since there is nothing else we need to do here.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer out.Close()
|
|
|
|
|
|
|
|
log.WithField("image", image).Debug("pulling docker image... this could take a bit of time")
|
|
|
|
|
|
|
|
// I'm not sure what the best approach here is, but this will block execution until the image
|
2020-09-05 19:08:40 +00:00
|
|
|
// is done being pulled, which is what we need.
|
2020-08-11 04:38:42 +00:00
|
|
|
scanner := bufio.NewScanner(out)
|
|
|
|
for scanner.Scan() {
|
2020-09-13 04:37:48 +00:00
|
|
|
s := imagePullStatus{}
|
|
|
|
fmt.Println(scanner.Text())
|
|
|
|
if err := json.Unmarshal(scanner.Bytes(), &s); err == nil {
|
|
|
|
e.Events().Publish(environment.DockerImagePullStatus, s.Status+" "+s.Progress)
|
|
|
|
}
|
2020-08-11 04:38:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := scanner.Err(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-09-13 04:37:48 +00:00
|
|
|
log.WithField("image", image).Debug("completed docker image pull")
|
|
|
|
|
2020-08-11 04:38:42 +00:00
|
|
|
return nil
|
|
|
|
}
|