re-refactor code
This commit is contained in:
@@ -1,77 +0,0 @@
|
||||
package server
|
||||
|
||||
import "sync"
|
||||
|
||||
type Collection struct {
|
||||
items []*Server
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// Create a new collection from a slice of servers.
|
||||
func NewCollection(servers []*Server) *Collection {
|
||||
return &Collection{
|
||||
items: servers,
|
||||
}
|
||||
}
|
||||
|
||||
// Return all of the items in the collection.
|
||||
func (c *Collection) All() []*Server {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
return c.items
|
||||
}
|
||||
|
||||
// Adds an item to the collection store.
|
||||
func (c *Collection) Add(s *Server) {
|
||||
c.Lock()
|
||||
c.items = append(c.items, s)
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
// Returns only those items matching the filter criteria.
|
||||
func (c *Collection) Filter(filter func(*Server) bool) []*Server {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
r := make([]*Server, 0)
|
||||
for _, v := range c.items {
|
||||
if filter(v) {
|
||||
r = append(r, v)
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Returns a single element from the collection matching the filter. If nothing is
|
||||
// found a nil result is returned.
|
||||
func (c *Collection) Find(filter func(*Server) bool) *Server {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
for _, v := range c.items {
|
||||
if filter(v) {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Removes all items from the collection that match the filter function.
|
||||
//
|
||||
// TODO: cancel the context?
|
||||
func (c *Collection) Remove(filter func(*Server) bool) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
r := make([]*Server, 0)
|
||||
for _, v := range c.items {
|
||||
if !filter(v) {
|
||||
r = append(r, v)
|
||||
}
|
||||
}
|
||||
|
||||
c.items = r
|
||||
}
|
||||
128
server/loader.go
128
server/loader.go
@@ -1,128 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gammazero/workerpool"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
"github.com/pterodactyl/wings/environment/docker"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
"github.com/pterodactyl/wings/server/filesystem"
|
||||
)
|
||||
|
||||
// Iterates over a given directory and loads all of the servers listed before returning
|
||||
// them to the calling function.
|
||||
func (m *manager) Initialize(serversPerPage int) error {
|
||||
if len(m.servers.items) != 0 {
|
||||
return errors.New("cannot call LoadDirectory with a non-nil collection")
|
||||
}
|
||||
|
||||
log.Info("fetching list of servers from API")
|
||||
assignedServers, err := m.panelClient.GetServers(context.TODO(), serversPerPage)
|
||||
if err != nil {
|
||||
if !remote.IsRequestError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
log.WithField("total_configs", len(assignedServers)).Info("processing servers returned by the API")
|
||||
|
||||
pool := workerpool.New(runtime.NumCPU())
|
||||
log.Debugf("using %d workerpools to instantiate server instances", runtime.NumCPU())
|
||||
for _, data := range assignedServers {
|
||||
pool.Submit(func() {
|
||||
// Parse the json.RawMessage into an expected struct value. We do this here so that a single broken
|
||||
// server does not cause the entire boot process to hang, and allows us to show more useful error
|
||||
// messaging in the output.
|
||||
d := api.ServerConfigurationResponse{
|
||||
Settings: data.Settings,
|
||||
}
|
||||
|
||||
log.WithField("server", data.Uuid).Info("creating new server object from API response")
|
||||
if err := json.Unmarshal(data.ProcessConfiguration, &d.ProcessConfiguration); err != nil {
|
||||
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
|
||||
return
|
||||
}
|
||||
|
||||
s, err := FromConfiguration(d)
|
||||
if err != nil {
|
||||
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
|
||||
return
|
||||
}
|
||||
|
||||
m.Add(s)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait until we've processed all of the configuration files in the directory
|
||||
// before continuing.
|
||||
pool.StopWait()
|
||||
|
||||
diff := time.Now().Sub(start)
|
||||
log.WithField("duration", fmt.Sprintf("%s", diff)).Info("finished processing server configurations")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initializes a server using a data byte array. This will be marshaled into the
|
||||
// given struct using a YAML marshaler. This will also configure the given environment
|
||||
// for a server.
|
||||
func FromConfiguration(data api.ServerConfigurationResponse) (*Server, error) {
|
||||
s, err := New()
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "loader: failed to instantiate empty server struct")
|
||||
}
|
||||
if err := s.UpdateDataStructure(data.Settings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.Archiver = Archiver{Server: s}
|
||||
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
||||
|
||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||
// this logic in. When we're ready to support other environment we'll need to make
|
||||
// some modifications here obviously.
|
||||
settings := environment.Settings{
|
||||
Mounts: s.Mounts(),
|
||||
Allocations: s.cfg.Allocations,
|
||||
Limits: s.cfg.Build,
|
||||
}
|
||||
|
||||
envCfg := environment.NewConfiguration(settings, s.GetEnvironmentVariables())
|
||||
meta := docker.Metadata{
|
||||
Image: s.Config().Container.Image,
|
||||
}
|
||||
|
||||
if env, err := docker.New(s.Id(), &meta, envCfg); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
s.Environment = env
|
||||
s.StartEventListeners()
|
||||
s.Throttler().StartTimer(s.Context())
|
||||
}
|
||||
|
||||
// Forces the configuration to be synced with the panel.
|
||||
if err := s.SyncWithConfiguration(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the server's data directory exists, force disk usage calculation.
|
||||
if _, err := os.Stat(s.Filesystem().Path()); err == nil {
|
||||
s.Filesystem().HasSpaceAvailable(true)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
@@ -1,46 +1,203 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/gammazero/workerpool"
|
||||
"github.com/pterodactyl/wings/api"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/remote"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
// Initialize fetches all servers assigned to this node from the API.
|
||||
Initialize(serversPerPage int) error
|
||||
GetAll() []*Server
|
||||
Get(uuid string) *Server
|
||||
Add(s *Server)
|
||||
Remove(s *Server)
|
||||
type Manager struct {
|
||||
mu sync.RWMutex
|
||||
servers []*Server
|
||||
}
|
||||
|
||||
type manager struct {
|
||||
servers Collection
|
||||
|
||||
panelClient remote.Client
|
||||
// NewManager returns a new server manager instance. This will boot up all of
|
||||
// the servers that are currently present on the filesystem and set them into
|
||||
// the manager.
|
||||
func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
|
||||
c := NewEmptyManager()
|
||||
if err := c.initializeFromRemoteSource(ctx, client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewManager creates a new server manager.
|
||||
func NewManager(panelClient remote.Client) Manager {
|
||||
return &manager{panelClient: panelClient}
|
||||
// NewEmptyManager returns a new empty manager collection without actually
|
||||
// loading any of the servers from the disk. This allows the caller to set their
|
||||
// own servers into the collection as needed.
|
||||
func NewEmptyManager() *Manager {
|
||||
return &Manager{}
|
||||
}
|
||||
|
||||
func (m *manager) GetAll() []*Server {
|
||||
return m.servers.items
|
||||
// initializeFromRemoteSource iterates over a given directory and loads all of
|
||||
// the servers listed before returning them to the calling function.
|
||||
func (m *Manager) initializeFromRemoteSource(ctx context.Context, client remote.Client) error {
|
||||
log.Info("fetching list of servers from API")
|
||||
servers, err := client.GetServers(ctx, config.Get().RemoteQuery.BootServersPerPage)
|
||||
if err != nil {
|
||||
if !remote.IsRequestError(err) {
|
||||
return errors.WithStackIf(err)
|
||||
}
|
||||
return errors.New(err.Error())
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
log.WithField("total_configs", len(servers)).Info("processing servers returned by the API")
|
||||
|
||||
pool := workerpool.New(runtime.NumCPU())
|
||||
log.Debugf("using %d workerpools to instantiate server instances", runtime.NumCPU())
|
||||
for _, data := range servers {
|
||||
pool.Submit(func() {
|
||||
// Parse the json.RawMessage into an expected struct value. We do this here so that a single broken
|
||||
// server does not cause the entire boot process to hang, and allows us to show more useful error
|
||||
// messaging in the output.
|
||||
d := api.ServerConfigurationResponse{
|
||||
Settings: data.Settings,
|
||||
}
|
||||
log.WithField("server", data.Uuid).Info("creating new server object from API response")
|
||||
if err := json.Unmarshal(data.ProcessConfiguration, &d.ProcessConfiguration); err != nil {
|
||||
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to parse server configuration from API response, skipping...")
|
||||
return
|
||||
}
|
||||
s, err := FromConfiguration(d)
|
||||
if err != nil {
|
||||
log.WithField("server", data.Uuid).WithField("error", err).Error("failed to load server, skipping...")
|
||||
return
|
||||
}
|
||||
m.Add(s)
|
||||
})
|
||||
}
|
||||
|
||||
// Wait until we've processed all of the configuration files in the directory
|
||||
// before continuing.
|
||||
pool.StopWait()
|
||||
|
||||
diff := time.Now().Sub(start)
|
||||
log.WithField("duration", fmt.Sprintf("%s", diff)).Info("finished processing server configurations")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) Get(uuid string) *Server {
|
||||
return m.servers.Find(func(s *Server) bool {
|
||||
return s.Id() == uuid
|
||||
// Put replaces all of the current values in the collection with the value that
|
||||
// is passed through.
|
||||
func (m *Manager) Put(s []*Server) {
|
||||
m.mu.Lock()
|
||||
m.servers = s
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
// All returns all of the items in the collection.
|
||||
func (m *Manager) All() []*Server {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.servers
|
||||
}
|
||||
|
||||
// Add adds an item to the collection store.
|
||||
func (m *Manager) Add(s *Server) {
|
||||
m.mu.Lock()
|
||||
m.servers = append(m.servers, s)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a single server instance and a boolean value indicating if it was
|
||||
// found in the global collection or not.
|
||||
func (m *Manager) Get(uuid string) (*Server, bool) {
|
||||
match := m.Find(func(server *Server) bool {
|
||||
return server.Id() == uuid
|
||||
})
|
||||
return match, match != nil
|
||||
}
|
||||
|
||||
func (m *manager) Add(s *Server) {
|
||||
s.manager = m
|
||||
m.servers.Add(s)
|
||||
// Filter returns only those items matching the filter criteria.
|
||||
func (m *Manager) Filter(filter func(match *Server) bool) []*Server {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
r := make([]*Server, 0)
|
||||
for _, v := range m.servers {
|
||||
if filter(v) {
|
||||
r = append(r, v)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (m *manager) Remove(s *Server) {
|
||||
m.servers.Remove(func(sf *Server) bool {
|
||||
return sf.Id() == s.Id()
|
||||
})
|
||||
// Find returns a single element from the collection matching the filter. If
|
||||
// nothing is found a nil result is returned.
|
||||
func (m *Manager) Find(filter func(match *Server) bool) *Server {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
for _, v := range m.servers {
|
||||
if filter(v) {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes all items from the collection that match the filter function.
|
||||
func (m *Manager) Remove(filter func(match *Server) bool) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
r := make([]*Server, 0)
|
||||
for _, v := range m.servers {
|
||||
if !filter(v) {
|
||||
r = append(r, v)
|
||||
}
|
||||
}
|
||||
m.servers = r
|
||||
}
|
||||
|
||||
// PersistStates writes the current environment states to the disk for each
|
||||
// server. This is generally called at a specific interval defined in the root
|
||||
// runner command to avoid hammering disk I/O when tons of server switch states
|
||||
// at once. It is fine if this file falls slightly out of sync, it is just here
|
||||
// to make recovering from an unexpected system reboot a little easier.
|
||||
func (m *Manager) PersistStates() error {
|
||||
states := map[string]string{}
|
||||
for _, s := range m.All() {
|
||||
states[s.Id()] = s.Environment.State()
|
||||
}
|
||||
data, err := json.Marshal(states)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(config.Get().System.GetStatesPath(), data, 0644); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadStates returns the state of the servers.
|
||||
func (m *Manager) ReadStates() (map[string]string, error) {
|
||||
f, err := os.OpenFile(config.Get().System.GetStatesPath(), os.O_RDONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
defer f.Close()
|
||||
var states map[string]string
|
||||
if err := json.NewDecoder(f).Decode(&states); err != nil && err != io.EOF {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
out := make(map[string]string, 0)
|
||||
// Only return states for servers that we're currently tracking in the system.
|
||||
for id, state := range states {
|
||||
if _, ok := m.Get(id); ok {
|
||||
out[id] = state
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
114
server/server.go
114
server/server.go
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -20,7 +21,8 @@ import (
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
// High level definition for a server instance being controlled by Wings.
|
||||
// Server is the high level definition for a server instance being controlled
|
||||
// by Wings.
|
||||
type Server struct {
|
||||
// Internal mutex used to block actions that need to occur sequentially, such as
|
||||
// writing the configuration to the disk.
|
||||
@@ -28,9 +30,6 @@ type Server struct {
|
||||
ctx context.Context
|
||||
ctxCancel *context.CancelFunc
|
||||
|
||||
// manager holds a reference to the manager responsible for the server
|
||||
manager *manager
|
||||
|
||||
emitterLock sync.Mutex
|
||||
powerLock *semaphore.Weighted
|
||||
throttleOnce sync.Once
|
||||
@@ -248,4 +247,109 @@ func (s *Server) EnsureDataDirectoryExists() error {
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the state of the server internally. This function handles crash detection as
|
||||
// well as reporting to event listeners for the server.
|
||||
func (s *Server) OnStateChange() {
|
||||
prevState := s.resources.State.Load()
|
||||
|
||||
st := s.Environment.State()
|
||||
// Update the currently tracked state for the server.
|
||||
s.resources.State.Store(st)
|
||||
|
||||
// Emit the event to any listeners that are currently registered.
|
||||
if prevState != s.Environment.State() {
|
||||
s.Log().WithField("status", st).Debug("saw server status change event")
|
||||
s.Events().Publish(StatusEvent, st)
|
||||
}
|
||||
|
||||
// Reset the resource usage to 0 when the process fully stops so that all of the UI
|
||||
// views in the Panel correctly display 0.
|
||||
if st == environment.ProcessOfflineState {
|
||||
s.resources.Reset()
|
||||
s.emitProcUsage()
|
||||
}
|
||||
|
||||
// If server was in an online state, and is now in an offline state we should handle
|
||||
// that as a crash event. In that scenario, check the last crash time, and the crash
|
||||
// counter.
|
||||
//
|
||||
// In the event that we have passed the thresholds, don't do anything, otherwise
|
||||
// automatically attempt to start the process back up for the user. This is done in a
|
||||
// separate thread as to not block any actions currently taking place in the flow
|
||||
// that called this function.
|
||||
if (prevState == environment.ProcessStartingState || prevState == environment.ProcessRunningState) && s.Environment.State() == environment.ProcessOfflineState {
|
||||
s.Log().Info("detected server as entering a crashed state; running crash handler")
|
||||
|
||||
go func(server *Server) {
|
||||
if err := server.handleServerCrash(); err != nil {
|
||||
if IsTooFrequentCrashError(err) {
|
||||
server.Log().Info("did not restart server after crash; occurred too soon after the last")
|
||||
} else {
|
||||
s.PublishConsoleOutputFromDaemon("Server crash was detected but an error occurred while handling it.")
|
||||
server.Log().WithField("error", err).Error("failed to handle server crash")
|
||||
}
|
||||
}
|
||||
}(s)
|
||||
}
|
||||
}
|
||||
|
||||
// Determines if the server state is running or not. This is different than the
|
||||
// environment state, it is simply the tracked state from this daemon instance, and
|
||||
// not the response from Docker.
|
||||
func (s *Server) IsRunning() bool {
|
||||
st := s.Environment.State()
|
||||
|
||||
return st == environment.ProcessRunningState || st == environment.ProcessStartingState
|
||||
}
|
||||
|
||||
// FromConfiguration initializes a server using a data byte array. This will be
|
||||
// marshaled into the given struct using a YAML marshaler. This will also
|
||||
// configure the given environment for a server.
|
||||
func FromConfiguration(data api.ServerConfigurationResponse) (*Server, error) {
|
||||
s, err := New()
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "loader: failed to instantiate empty server struct")
|
||||
}
|
||||
if err := s.UpdateDataStructure(data.Settings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.Archiver = Archiver{Server: s}
|
||||
s.fs = filesystem.New(filepath.Join(config.Get().System.Data, s.Id()), s.DiskSpace(), s.Config().Egg.FileDenylist)
|
||||
|
||||
// Right now we only support a Docker based environment, so I'm going to hard code
|
||||
// this logic in. When we're ready to support other environment we'll need to make
|
||||
// some modifications here obviously.
|
||||
settings := environment.Settings{
|
||||
Mounts: s.Mounts(),
|
||||
Allocations: s.cfg.Allocations,
|
||||
Limits: s.cfg.Build,
|
||||
}
|
||||
|
||||
envCfg := environment.NewConfiguration(settings, s.GetEnvironmentVariables())
|
||||
meta := docker.Metadata{
|
||||
Image: s.Config().Container.Image,
|
||||
}
|
||||
|
||||
if env, err := docker.New(s.Id(), &meta, envCfg); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
s.Environment = env
|
||||
s.StartEventListeners()
|
||||
s.Throttler().StartTimer(s.Context())
|
||||
}
|
||||
|
||||
// Forces the configuration to be synced with the panel.
|
||||
if err := s.SyncWithConfiguration(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the server's data directory exists, force disk usage calculation.
|
||||
if _, err := os.Stat(s.Filesystem().Path()); err == nil {
|
||||
s.Filesystem().HasSpaceAvailable(true)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
137
server/state.go
137
server/state.go
@@ -1,137 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/environment"
|
||||
)
|
||||
|
||||
var stateMutex sync.Mutex
|
||||
|
||||
// Returns the state of the servers.
|
||||
func CachedServerStates() (map[string]string, error) {
|
||||
// Request a lock after we check if the file exists.
|
||||
stateMutex.Lock()
|
||||
defer stateMutex.Unlock()
|
||||
|
||||
// Open the states file.
|
||||
f, err := os.OpenFile(config.Get().System.GetStatesPath(), os.O_RDONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Convert the json object to a map.
|
||||
states := map[string]string{}
|
||||
if err := json.NewDecoder(f).Decode(&states); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return states, nil
|
||||
}
|
||||
|
||||
// saveServerStates .
|
||||
func (m *manager) saveServerStates() error {
|
||||
// Get the states of all servers on the daemon.
|
||||
states := map[string]string{}
|
||||
for _, s := range m.GetAll() {
|
||||
states[s.Id()] = s.Environment.State()
|
||||
}
|
||||
|
||||
// Convert the map to a json object.
|
||||
data, err := json.Marshal(states)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stateMutex.Lock()
|
||||
defer stateMutex.Unlock()
|
||||
|
||||
// Write the data to the file
|
||||
if err := ioutil.WriteFile(config.Get().System.GetStatesPath(), data, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sets the state of the server internally. This function handles crash detection as
|
||||
// well as reporting to event listeners for the server.
|
||||
func (s *Server) OnStateChange() {
|
||||
prevState := s.resources.State.Load()
|
||||
|
||||
st := s.Environment.State()
|
||||
// Update the currently tracked state for the server.
|
||||
s.resources.State.Store(st)
|
||||
|
||||
// Emit the event to any listeners that are currently registered.
|
||||
if prevState != s.Environment.State() {
|
||||
s.Log().WithField("status", st).Debug("saw server status change event")
|
||||
s.Events().Publish(StatusEvent, st)
|
||||
}
|
||||
|
||||
// Persist this change to the disk immediately so that should the Daemon be stopped or
|
||||
// crash we can immediately restore the server state.
|
||||
//
|
||||
// This really only makes a difference if all of the Docker containers are also stopped,
|
||||
// but this was a highly requested feature and isn't hard to work with, so lets do it.
|
||||
//
|
||||
// We also get the benefit of server status changes always propagating corrected configurations
|
||||
// to the disk should we forget to do it elsewhere.
|
||||
go func() {
|
||||
if err := s.manager.saveServerStates(); err != nil {
|
||||
s.Log().WithField("error", err).Warn("failed to write server states to disk")
|
||||
}
|
||||
}()
|
||||
|
||||
// Reset the resource usage to 0 when the process fully stops so that all of the UI
|
||||
// views in the Panel correctly display 0.
|
||||
if st == environment.ProcessOfflineState {
|
||||
s.resources.Reset()
|
||||
s.emitProcUsage()
|
||||
}
|
||||
|
||||
// If server was in an online state, and is now in an offline state we should handle
|
||||
// that as a crash event. In that scenario, check the last crash time, and the crash
|
||||
// counter.
|
||||
//
|
||||
// In the event that we have passed the thresholds, don't do anything, otherwise
|
||||
// automatically attempt to start the process back up for the user. This is done in a
|
||||
// separate thread as to not block any actions currently taking place in the flow
|
||||
// that called this function.
|
||||
if (prevState == environment.ProcessStartingState || prevState == environment.ProcessRunningState) && s.Environment.State() == environment.ProcessOfflineState {
|
||||
s.Log().Info("detected server as entering a crashed state; running crash handler")
|
||||
|
||||
go func(server *Server) {
|
||||
if err := server.handleServerCrash(); err != nil {
|
||||
if IsTooFrequentCrashError(err) {
|
||||
server.Log().Info("did not restart server after crash; occurred too soon after the last")
|
||||
} else {
|
||||
s.PublishConsoleOutputFromDaemon("Server crash was detected but an error occurred while handling it.")
|
||||
server.Log().WithField("error", err).Error("failed to handle server crash")
|
||||
}
|
||||
}
|
||||
}(s)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the current state of the server in a race-safe manner.
|
||||
// Deprecated
|
||||
// use Environment.State()
|
||||
func (s *Server) GetState() string {
|
||||
return s.Environment.State()
|
||||
}
|
||||
|
||||
// Determines if the server state is running or not. This is different than the
|
||||
// environment state, it is simply the tracked state from this daemon instance, and
|
||||
// not the response from Docker.
|
||||
func (s *Server) IsRunning() bool {
|
||||
st := s.Environment.State()
|
||||
|
||||
return st == environment.ProcessRunningState || st == environment.ProcessStartingState
|
||||
}
|
||||
Reference in New Issue
Block a user