Better tracking of SFTP events
This commit is contained in:
@@ -12,16 +12,16 @@ import (
|
||||
)
|
||||
|
||||
var key = []byte("events")
|
||||
var processing system.AtomicBool
|
||||
var activityCron system.AtomicBool
|
||||
|
||||
func processActivityLogs(m *server.Manager, c int64) error {
|
||||
// Don't execute this cron if there is currently one running. Once this task is completed
|
||||
// go ahead and mark it as no longer running.
|
||||
if !processing.SwapIf(true) {
|
||||
log.WithField("subsystem", "cron").Warn("cron: process overlap detected, skipping this run")
|
||||
if !activityCron.SwapIf(true) {
|
||||
log.WithField("subsystem", "cron").WithField("cron", "activity_logs").Warn("cron: process overlap detected, skipping this run")
|
||||
return nil
|
||||
}
|
||||
defer processing.Store(false)
|
||||
defer activityCron.Store(false)
|
||||
|
||||
var list [][]byte
|
||||
err := database.DB().View(func(tx *nutsdb.Tx) error {
|
||||
@@ -30,6 +30,9 @@ func processActivityLogs(m *server.Manager, c int64) error {
|
||||
// release the lock on this process.
|
||||
end := int(c)
|
||||
if s, err := tx.LSize(database.ServerActivityBucket, key); err != nil {
|
||||
if errors.Is(err, nutsdb.ErrBucket) {
|
||||
return nil
|
||||
}
|
||||
return errors.WithStackIf(err)
|
||||
} else if s < end || s == 0 {
|
||||
if s == 0 {
|
||||
|
||||
@@ -10,16 +10,17 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const ErrCronRunning = errors.Sentinel("cron: job already running")
|
||||
|
||||
var o system.AtomicBool
|
||||
|
||||
// Scheduler configures the internal cronjob system for Wings and returns the scheduler
|
||||
// instance to the caller. This should only be called once per application lifecycle, additional
|
||||
// calls will result in an error being returned.
|
||||
func Scheduler(m *server.Manager) (*gocron.Scheduler, error) {
|
||||
if o.Load() {
|
||||
if !o.SwapIf(true) {
|
||||
return nil, errors.New("cron: cannot call scheduler more than once in application lifecycle")
|
||||
}
|
||||
o.Store(true)
|
||||
l, err := time.LoadLocation(config.Get().System.Timezone)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cron: failed to parse configured system timezone")
|
||||
@@ -32,5 +33,16 @@ func Scheduler(m *server.Manager) (*gocron.Scheduler, error) {
|
||||
}
|
||||
})
|
||||
|
||||
_, _ = s.Tag("sftp").Every(20).Seconds().Do(func() {
|
||||
runner := sftpEventProcessor{mu: system.NewAtomicBool(false), manager: m}
|
||||
if err := runner.Run(); err != nil {
|
||||
if errors.Is(err, ErrCronRunning) {
|
||||
log.WithField("cron", "sftp_events").Warn("cron: job already running, skipping...")
|
||||
} else {
|
||||
log.WithField("error", err).Error("cron: failed to process sftp events")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
174
internal/cron/sftp_cron.go
Normal file
174
internal/cron/sftp_cron.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"emperror.dev/errors"
|
||||
"encoding/gob"
|
||||
"github.com/pterodactyl/wings/internal/database"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/sftp"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"github.com/xujiajun/nutsdb"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type UserDetail struct {
|
||||
UUID string
|
||||
IP string
|
||||
}
|
||||
|
||||
type Users map[UserDetail][]sftp.EventRecord
|
||||
type Events map[sftp.Event]Users
|
||||
|
||||
type sftpEventProcessor struct {
|
||||
mu *system.AtomicBool
|
||||
manager *server.Manager
|
||||
}
|
||||
|
||||
// Run executes the cronjob and processes sftp activities into normal activity log entries
|
||||
// by merging together similar records. This helps to reduce the sheer amount of data that
|
||||
// gets passed back to the Panel and provides simpler activity logging.
|
||||
func (sep *sftpEventProcessor) Run() error {
|
||||
if !sep.mu.SwapIf(true) {
|
||||
return errors.WithStack(ErrCronRunning)
|
||||
}
|
||||
defer sep.mu.Store(false)
|
||||
|
||||
set, err := sep.Events()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for s, el := range set {
|
||||
events := make(Events)
|
||||
// Take all of the events that we've pulled out of the system for every server and then
|
||||
// parse them into a more usable format in order to create activity log entries for each
|
||||
// user, ip, and server instance.
|
||||
for _, e := range el {
|
||||
u := UserDetail{UUID: e.User, IP: e.IP}
|
||||
if _, ok := events[e.Event]; !ok {
|
||||
events[e.Event] = make(Users)
|
||||
}
|
||||
if _, ok := events[e.Event][u]; !ok {
|
||||
events[e.Event][u] = []sftp.EventRecord{}
|
||||
}
|
||||
events[e.Event][u] = append(events[e.Event][u], e)
|
||||
}
|
||||
|
||||
// Now that we have all of the events, go ahead and create a normal activity log entry
|
||||
// for each instance grouped by user & IP for easier Panel reporting.
|
||||
for k, v := range events {
|
||||
for u, records := range v {
|
||||
files := make([]interface{}, len(records))
|
||||
for i, r := range records {
|
||||
if r.Action.Target != "" {
|
||||
files[i] = map[string]string{
|
||||
"from": filepath.Clean(r.Action.Entity),
|
||||
"to": filepath.Clean(r.Action.Target),
|
||||
}
|
||||
} else {
|
||||
files[i] = filepath.Clean(r.Action.Entity)
|
||||
}
|
||||
}
|
||||
|
||||
entry := server.Activity{
|
||||
Server: s,
|
||||
User: u.UUID,
|
||||
Event: server.Event("server:sftp." + string(k)),
|
||||
Metadata: server.ActivityMeta{"files": files},
|
||||
IP: u.IP,
|
||||
// Just assume that the first record in the set is the oldest and the most relevant
|
||||
// of the timestamps to use.
|
||||
Timestamp: records[0].Timestamp,
|
||||
}
|
||||
|
||||
if err := entry.Save(); err != nil {
|
||||
return errors.Wrap(err, "cron: failed to save new event for server")
|
||||
}
|
||||
|
||||
if err := sep.Cleanup([]byte(s)); err != nil {
|
||||
return errors.Wrap(err, "cron: failed to cleanup events")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup runs through all of the events we have currently tracked in the bucket and removes
|
||||
// them once we've managed to process them and created the associated server activity events.
|
||||
func (sep *sftpEventProcessor) Cleanup(key []byte) error {
|
||||
return database.DB().Update(func(tx *nutsdb.Tx) error {
|
||||
s, err := sep.sizeOf(tx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s == 0 {
|
||||
return nil
|
||||
} else if s < sep.limit() {
|
||||
for i := 0; i < s; i++ {
|
||||
if _, err := tx.LPop(database.SftpActivityBucket, key); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := tx.LTrim(database.ServerActivityBucket, key, sep.limit()-1, -1); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Events pulls all of the events in the SFTP event bucket and parses them into an iterable
|
||||
// set allowing Wings to process the events and send them back to the Panel instance.
|
||||
func (sep *sftpEventProcessor) Events() (map[string][]sftp.EventRecord, error) {
|
||||
set := make(map[string][]sftp.EventRecord, len(sep.manager.Keys()))
|
||||
err := database.DB().View(func(tx *nutsdb.Tx) error {
|
||||
for _, k := range sep.manager.Keys() {
|
||||
lim := sep.limit()
|
||||
if s, err := sep.sizeOf(tx, []byte(k)); err != nil {
|
||||
return err
|
||||
} else if s == 0 {
|
||||
continue
|
||||
} else if s < lim {
|
||||
lim = -1
|
||||
}
|
||||
list, err := tx.LRange(database.SftpActivityBucket, []byte(k), 0, lim)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
set[k] = make([]sftp.EventRecord, len(list))
|
||||
for i, l := range list {
|
||||
if err := gob.NewDecoder(bytes.NewBuffer(l)).Decode(&set[k][i]); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return set, err
|
||||
}
|
||||
|
||||
// sizeOf is a wrapper around a nutsdb transaction to get the size of a key in the
|
||||
// bucket while also accounting for some expected error conditions and handling those
|
||||
// automatically.
|
||||
func (sep *sftpEventProcessor) sizeOf(tx *nutsdb.Tx, key []byte) (int, error) {
|
||||
s, err := tx.LSize(database.SftpActivityBucket, key)
|
||||
if err != nil {
|
||||
if errors.Is(err, nutsdb.ErrBucket) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// limit returns the number of records that are processed for each server at
|
||||
// once. This will then be translated into a variable number of activity log
|
||||
// events, with the worst case being a single event with "n" associated files.
|
||||
func (sep *sftpEventProcessor) limit() int {
|
||||
return 500
|
||||
}
|
||||
Reference in New Issue
Block a user