Switch to SQLite for activity tracking
This commit is contained in:
parent
e1e7916790
commit
7bd11c1c28
|
@ -6,6 +6,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"github.com/pterodactyl/wings/internal/cron"
|
||||
"github.com/pterodactyl/wings/internal/sqlite"
|
||||
log2 "log"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
|
@ -131,6 +132,10 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||
}),
|
||||
)
|
||||
|
||||
if err := sqlite.Initialize(cmd.Context()); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to initialize database")
|
||||
}
|
||||
|
||||
manager, err := server.NewManager(cmd.Context(), pclient)
|
||||
if err != nil {
|
||||
log.WithField("error", err).Fatal("failed to load server configurations")
|
||||
|
@ -260,7 +265,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
}()
|
||||
|
||||
if s, err := cron.Scheduler(manager); err != nil {
|
||||
if s, err := cron.Scheduler(cmd.Context(), manager); err != nil {
|
||||
log.WithField("error", err).Fatal("failed to initialize cron system")
|
||||
} else {
|
||||
log.WithField("subsystem", "cron").Info("starting cron processes")
|
||||
|
|
25
go.mod
25
go.mod
|
@ -44,7 +44,12 @@ require (
|
|||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require github.com/goccy/go-json v0.9.6
|
||||
require (
|
||||
github.com/go-co-op/gocron v1.15.0
|
||||
github.com/goccy/go-json v0.9.6
|
||||
github.com/klauspost/compress v1.15.1
|
||||
modernc.org/sqlite v1.17.3
|
||||
)
|
||||
|
||||
require golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e // indirect
|
||||
|
||||
|
@ -54,7 +59,6 @@ require (
|
|||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bwmarrin/snowflake v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/containerd v1.6.2 // indirect
|
||||
github.com/containerd/fifo v1.0.0 // indirect
|
||||
|
@ -66,7 +70,6 @@ require (
|
|||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/gammazero/deque v0.1.1 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-co-op/gocron v1.15.0 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.10.1 // indirect
|
||||
|
@ -77,7 +80,6 @@ require (
|
|||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/klauspost/compress v1.15.1 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/magefile/mage v1.13.0 // indirect
|
||||
|
@ -98,24 +100,33 @@ require (
|
|||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
github.com/xujiajun/mmap-go v1.0.1 // indirect
|
||||
github.com/xujiajun/nutsdb v0.9.0 // indirect
|
||||
github.com/xujiajun/utils v0.0.0-20190123093513-8bf096c4f53b // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
|
||||
golang.org/x/tools v0.1.1 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb // indirect
|
||||
google.golang.org/grpc v1.45.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/uint128 v1.1.1 // indirect
|
||||
modernc.org/cc/v3 v3.36.0 // indirect
|
||||
modernc.org/ccgo/v3 v3.16.6 // indirect
|
||||
modernc.org/libc v1.16.7 // indirect
|
||||
modernc.org/mathutil v1.4.1 // indirect
|
||||
modernc.org/memory v1.1.1 // indirect
|
||||
modernc.org/opt v0.1.1 // indirect
|
||||
modernc.org/strutil v1.1.1 // indirect
|
||||
modernc.org/token v1.0.0 // indirect
|
||||
)
|
||||
|
|
|
@ -1,102 +1,102 @@
|
|||
package cron
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/pterodactyl/wings/internal/database"
|
||||
"encoding/gob"
|
||||
"github.com/pterodactyl/wings/internal/sqlite"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"github.com/xujiajun/nutsdb"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var key = []byte("events")
|
||||
var activityCron system.AtomicBool
|
||||
type activityCron struct {
|
||||
mu *system.AtomicBool
|
||||
manager *server.Manager
|
||||
max int64
|
||||
}
|
||||
|
||||
func processActivityLogs(m *server.Manager, c int64) error {
|
||||
const queryRegularActivity = `
|
||||
SELECT id, event, user_uuid, server_uuid, metadata, ip, timestamp FROM activity_logs
|
||||
WHERE event NOT LIKE 'server:sftp.%'
|
||||
ORDER BY timestamp
|
||||
LIMIT ?
|
||||
`
|
||||
|
||||
type QueriedActivity struct {
|
||||
id int
|
||||
b []byte
|
||||
server.Activity
|
||||
}
|
||||
|
||||
// Parse parses the internal query results into the QueriedActivity type and then properly
|
||||
// sets the Metadata onto it. This also sets the ID that was returned to ensure we're able
|
||||
// to then delete all of the matching rows in the database after we're done.
|
||||
func (qa *QueriedActivity) Parse(r *sql.Rows) error {
|
||||
if err := r.Scan(&qa.id, &qa.Event, &qa.User, &qa.Server, &qa.b, &qa.IP, &qa.Timestamp); err != nil {
|
||||
return errors.Wrap(err, "cron: failed to parse activity log")
|
||||
}
|
||||
if err := gob.NewDecoder(bytes.NewBuffer(qa.b)).Decode(&qa.Metadata); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run executes the cronjob and ensures we fetch and send all of the stored activity to the
|
||||
// Panel instance. Once activity is sent it is deleted from the local database instance. Any
|
||||
// SFTP specific events are not handled in this cron, they're handled seperately to account
|
||||
// for de-duplication and event merging.
|
||||
func (ac *activityCron) Run(ctx context.Context) error {
|
||||
// Don't execute this cron if there is currently one running. Once this task is completed
|
||||
// go ahead and mark it as no longer running.
|
||||
if !activityCron.SwapIf(true) {
|
||||
log.WithField("subsystem", "cron").WithField("cron", "activity_logs").Warn("cron: process overlap detected, skipping this run")
|
||||
if !ac.mu.SwapIf(true) {
|
||||
return errors.WithStack(ErrCronRunning)
|
||||
}
|
||||
defer ac.mu.Store(false)
|
||||
|
||||
rows, err := sqlite.Instance().QueryContext(ctx, queryRegularActivity, ac.max)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cron: failed to query activity logs")
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var logs []server.Activity
|
||||
var ids []int
|
||||
for rows.Next() {
|
||||
var qa QueriedActivity
|
||||
if err := qa.Parse(rows); err != nil {
|
||||
return err
|
||||
}
|
||||
ids = append(ids, qa.id)
|
||||
logs = append(logs, qa.Activity)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
return nil
|
||||
}
|
||||
defer activityCron.Store(false)
|
||||
|
||||
var list [][]byte
|
||||
err := database.DB().View(func(tx *nutsdb.Tx) error {
|
||||
// Grab the oldest 100 activity events that have been logged and send them back to the
|
||||
// Panel for processing. Once completed, delete those events from the database and then
|
||||
// release the lock on this process.
|
||||
end := int(c)
|
||||
if s, err := tx.LSize(database.ServerActivityBucket, key); err != nil {
|
||||
if errors.Is(err, nutsdb.ErrBucket) {
|
||||
return nil
|
||||
}
|
||||
return errors.WithStackIf(err)
|
||||
} else if s < end || s == 0 {
|
||||
if s == 0 {
|
||||
return nil
|
||||
}
|
||||
end = s
|
||||
}
|
||||
l, err := tx.LRange(database.ServerActivityBucket, key, 0, end)
|
||||
if err != nil {
|
||||
// This error is returned when the bucket doesn't exist, which is likely on the
|
||||
// first invocations of Wings since we haven't yet logged any data. There is nothing
|
||||
// that needs to be done if this error occurs.
|
||||
if errors.Is(err, nutsdb.ErrBucket) {
|
||||
return nil
|
||||
}
|
||||
return errors.WithStackIf(err)
|
||||
}
|
||||
list = l
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil || len(list) == 0 {
|
||||
return errors.WithStackIf(err)
|
||||
}
|
||||
|
||||
var processed []json.RawMessage
|
||||
for _, l := range list {
|
||||
var v json.RawMessage
|
||||
if err := json.Unmarshal(l, &v); err != nil {
|
||||
log.WithField("error", errors.WithStack(err)).Warn("failed to parse activity event json, skipping entry")
|
||||
continue
|
||||
}
|
||||
processed = append(processed, v)
|
||||
}
|
||||
|
||||
if err := m.Client().SendActivityLogs(context.Background(), processed); err != nil {
|
||||
if err := ac.manager.Client().SendActivityLogs(context.Background(), logs); err != nil {
|
||||
return errors.WrapIf(err, "cron: failed to send activity events to Panel")
|
||||
}
|
||||
|
||||
return database.DB().Update(func(tx *nutsdb.Tx) error {
|
||||
if m, err := tx.LSize(database.ServerActivityBucket, key); err != nil {
|
||||
return errors.WithStack(err)
|
||||
} else if m > len(list) {
|
||||
// As long as there are more elements than we have in the length of our list
|
||||
// we can just use the existing `LTrim` functionality of nutsdb. This will remove
|
||||
// all of the values we've already pulled and sent to the API.
|
||||
return errors.WithStack(tx.LTrim(database.ServerActivityBucket, key, len(list), -1))
|
||||
} else {
|
||||
i := 0
|
||||
// This is the only way I can figure out to actually empty the items out of the list
|
||||
// because you cannot use `LTrim` (or I cannot for the life of me figure out how) to
|
||||
// trim the slice down to 0 items without it triggering an internal logic error. Perhaps
|
||||
// in a future release they'll have a function to do this (based on my skimming of issues
|
||||
// on GitHub that I cannot read due to translation barriers).
|
||||
for {
|
||||
if i >= m {
|
||||
break
|
||||
}
|
||||
if _, err := tx.LPop(database.ServerActivityBucket, key); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
i++
|
||||
}
|
||||
if tx, err := sqlite.Instance().Begin(); err != nil {
|
||||
return err
|
||||
} else {
|
||||
t := make([]string, len(ids))
|
||||
params := make([]interface{}, len(ids))
|
||||
for i := 0; i < len(ids); i++ {
|
||||
t[i] = "?"
|
||||
params[i] = ids[i]
|
||||
}
|
||||
return nil
|
||||
})
|
||||
q := strings.Join(t, ",")
|
||||
_, err := tx.Exec(`DELETE FROM activity_logs WHERE id IN(`+q+`)`, params...)
|
||||
if err != nil {
|
||||
return errors.Combine(errors.WithStack(err), tx.Rollback())
|
||||
}
|
||||
return errors.WithStack(tx.Commit())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/go-co-op/gocron"
|
||||
|
@ -17,7 +18,7 @@ var o system.AtomicBool
|
|||
// Scheduler configures the internal cronjob system for Wings and returns the scheduler
|
||||
// instance to the caller. This should only be called once per application lifecycle, additional
|
||||
// calls will result in an error being returned.
|
||||
func Scheduler(m *server.Manager) (*gocron.Scheduler, error) {
|
||||
func Scheduler(ctx context.Context, m *server.Manager) (*gocron.Scheduler, error) {
|
||||
if !o.SwapIf(true) {
|
||||
return nil, errors.New("cron: cannot call scheduler more than once in application lifecycle")
|
||||
}
|
||||
|
@ -26,20 +27,20 @@ func Scheduler(m *server.Manager) (*gocron.Scheduler, error) {
|
|||
return nil, errors.Wrap(err, "cron: failed to parse configured system timezone")
|
||||
}
|
||||
|
||||
s := gocron.NewScheduler(l)
|
||||
_, _ = s.Tag("activity").Every(int(config.Get().System.ActivitySendInterval)).Seconds().Do(func() {
|
||||
if err := processActivityLogs(m, config.Get().System.ActivitySendCount); err != nil {
|
||||
log.WithField("error", err).Error("cron: failed to process activity events")
|
||||
}
|
||||
})
|
||||
activity := activityCron{
|
||||
mu: system.NewAtomicBool(false),
|
||||
manager: m,
|
||||
max: config.Get().System.ActivitySendCount,
|
||||
}
|
||||
|
||||
_, _ = s.Tag("sftp").Every(20).Seconds().Do(func() {
|
||||
runner := sftpEventProcessor{mu: system.NewAtomicBool(false), manager: m}
|
||||
if err := runner.Run(); err != nil {
|
||||
s := gocron.NewScheduler(l)
|
||||
// int(config.Get().System.ActivitySendInterval)
|
||||
_, _ = s.Tag("activity").Every(5).Seconds().Do(func() {
|
||||
if err := activity.Run(ctx); err != nil {
|
||||
if errors.Is(err, ErrCronRunning) {
|
||||
log.WithField("cron", "sftp_events").Warn("cron: job already running, skipping...")
|
||||
log.WithField("cron", "activity").Warn("cron: process is already running, skipping...")
|
||||
} else {
|
||||
log.WithField("error", err).Error("cron: failed to process sftp events")
|
||||
log.WithField("error", err).Error("cron: failed to process activity events")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
@ -1,188 +0,0 @@
|
|||
package cron
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"emperror.dev/errors"
|
||||
"encoding/gob"
|
||||
"github.com/pterodactyl/wings/internal/database"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"github.com/pterodactyl/wings/sftp"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
"github.com/xujiajun/nutsdb"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type UserDetail struct {
|
||||
UUID string
|
||||
IP string
|
||||
}
|
||||
|
||||
type Users map[UserDetail][]sftp.EventRecord
|
||||
type Events map[sftp.Event]Users
|
||||
|
||||
type sftpEventProcessor struct {
|
||||
mu *system.AtomicBool
|
||||
manager *server.Manager
|
||||
}
|
||||
|
||||
// Run executes the cronjob and processes sftp activities into normal activity log entries
|
||||
// by merging together similar records. This helps to reduce the sheer amount of data that
|
||||
// gets passed back to the Panel and provides simpler activity logging.
|
||||
func (sep *sftpEventProcessor) Run() error {
|
||||
if !sep.mu.SwapIf(true) {
|
||||
return errors.WithStack(ErrCronRunning)
|
||||
}
|
||||
defer sep.mu.Store(false)
|
||||
|
||||
set, err := sep.Events()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for s, el := range set {
|
||||
events := make(Events)
|
||||
// Take all of the events that we've pulled out of the system for every server and then
|
||||
// parse them into a more usable format in order to create activity log entries for each
|
||||
// user, ip, and server instance.
|
||||
for _, e := range el {
|
||||
u := UserDetail{UUID: e.User, IP: e.IP}
|
||||
if _, ok := events[e.Event]; !ok {
|
||||
events[e.Event] = make(Users)
|
||||
}
|
||||
if _, ok := events[e.Event][u]; !ok {
|
||||
events[e.Event][u] = []sftp.EventRecord{}
|
||||
}
|
||||
events[e.Event][u] = append(events[e.Event][u], e)
|
||||
}
|
||||
|
||||
// Now that we have all of the events, go ahead and create a normal activity log entry
|
||||
// for each instance grouped by user & IP for easier Panel reporting.
|
||||
for k, v := range events {
|
||||
for u, records := range v {
|
||||
files := make([]interface{}, len(records))
|
||||
for i, r := range records {
|
||||
if r.Action.Target != "" {
|
||||
files[i] = map[string]string{
|
||||
"from": filepath.Clean(r.Action.Entity),
|
||||
"to": filepath.Clean(r.Action.Target),
|
||||
}
|
||||
} else {
|
||||
files[i] = filepath.Clean(r.Action.Entity)
|
||||
}
|
||||
}
|
||||
|
||||
entry := server.Activity{
|
||||
Server: s,
|
||||
User: u.UUID,
|
||||
Event: server.Event("server:sftp." + string(k)),
|
||||
Metadata: server.ActivityMeta{"files": files},
|
||||
IP: u.IP,
|
||||
// Just assume that the first record in the set is the oldest and the most relevant
|
||||
// of the timestamps to use.
|
||||
Timestamp: records[0].Timestamp,
|
||||
}
|
||||
|
||||
if err := entry.Save(); err != nil {
|
||||
return errors.Wrap(err, "cron: failed to save new event for server")
|
||||
}
|
||||
|
||||
if err := sep.Cleanup([]byte(s)); err != nil {
|
||||
return errors.Wrap(err, "cron: failed to cleanup events")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup runs through all of the events we have currently tracked in the bucket and removes
|
||||
// them once we've managed to process them and created the associated server activity events.
|
||||
func (sep *sftpEventProcessor) Cleanup(key []byte) error {
|
||||
err := database.DB().Update(func(tx *nutsdb.Tx) error {
|
||||
s, err := sep.sizeOf(tx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s == 0 {
|
||||
return nil
|
||||
} else if s < sep.limit() {
|
||||
for i := 0; i < s; i++ {
|
||||
if _, err := tx.LPop(database.SftpActivityBucket, key); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := tx.LTrim(database.ServerActivityBucket, key, sep.limit()-1, -1); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Sometimes the key will end up not being found depending on the order of operations for
|
||||
// different events that are happening on the system. Make sure to account for that here,
|
||||
// if the key isn't found we can just safely assume it is a non issue and move on with our
|
||||
// day since there is nothing to clean up.
|
||||
if err != nil && errors.Is(err, nutsdb.ErrKeyNotFound) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Events pulls all of the events in the SFTP event bucket and parses them into an iterable
|
||||
// set allowing Wings to process the events and send them back to the Panel instance.
|
||||
func (sep *sftpEventProcessor) Events() (map[string][]sftp.EventRecord, error) {
|
||||
set := make(map[string][]sftp.EventRecord, len(sep.manager.Keys()))
|
||||
err := database.DB().View(func(tx *nutsdb.Tx) error {
|
||||
for _, k := range sep.manager.Keys() {
|
||||
lim := sep.limit()
|
||||
if s, err := sep.sizeOf(tx, []byte(k)); err != nil {
|
||||
// Not every server instance will have events tracked, so don't treat this
|
||||
// as a true error.
|
||||
if errors.Is(err, nutsdb.ErrKeyNotFound) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
} else if s == 0 {
|
||||
continue
|
||||
} else if s < lim {
|
||||
lim = -1
|
||||
}
|
||||
list, err := tx.LRange(database.SftpActivityBucket, []byte(k), 0, lim)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
set[k] = make([]sftp.EventRecord, len(list))
|
||||
for i, l := range list {
|
||||
if err := gob.NewDecoder(bytes.NewBuffer(l)).Decode(&set[k][i]); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return set, err
|
||||
}
|
||||
|
||||
// sizeOf is a wrapper around a nutsdb transaction to get the size of a key in the
|
||||
// bucket while also accounting for some expected error conditions and handling those
|
||||
// automatically.
|
||||
func (sep *sftpEventProcessor) sizeOf(tx *nutsdb.Tx, key []byte) (int, error) {
|
||||
s, err := tx.LSize(database.SftpActivityBucket, key)
|
||||
if err != nil {
|
||||
if errors.Is(err, nutsdb.ErrBucket) {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// limit returns the number of records that are processed for each server at
|
||||
// once. This will then be translated into a variable number of activity log
|
||||
// events, with the worst case being a single event with "n" associated files.
|
||||
func (sep *sftpEventProcessor) limit() int {
|
||||
return 500
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/xujiajun/nutsdb"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var db *nutsdb.DB
|
||||
var syncer sync.Once
|
||||
|
||||
const (
|
||||
ServerActivityBucket = "server_activity"
|
||||
SftpActivityBucket = "sftp_activity"
|
||||
)
|
||||
|
||||
func initialize() error {
|
||||
opt := nutsdb.DefaultOptions
|
||||
opt.Dir = filepath.Join(config.Get().System.RootDirectory, "db")
|
||||
|
||||
instance, err := nutsdb.Open(opt)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
db = instance
|
||||
return nil
|
||||
}
|
||||
|
||||
func DB() *nutsdb.DB {
|
||||
syncer.Do(func() {
|
||||
if err := initialize(); err != nil {
|
||||
log.WithField("error", err).Fatal("database: failed to initialize instance, this is an unrecoverable error")
|
||||
}
|
||||
})
|
||||
return db
|
||||
}
|
56
internal/sqlite/database.go
Normal file
56
internal/sqlite/database.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"emperror.dev/errors"
|
||||
"github.com/apex/log"
|
||||
"github.com/pterodactyl/wings/config"
|
||||
"github.com/pterodactyl/wings/system"
|
||||
_ "modernc.org/sqlite"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var o system.AtomicBool
|
||||
var db *sql.DB
|
||||
|
||||
const schema = `
|
||||
CREATE TABLE IF NOT EXISTS "activity_logs" (
|
||||
"id" integer,
|
||||
"event" varchar NOT NULL,
|
||||
"user_uuid" varchar,
|
||||
"server_uuid" varchar NOT NULL,
|
||||
"metadata" blob,
|
||||
"ip" varchar,
|
||||
"timestamp" datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (id)
|
||||
);
|
||||
|
||||
-- Add an index otherwise we're gonna end up with performance issues over time especially
|
||||
-- on huge Wings instances where we'll have a large number of activity logs to parse through.
|
||||
CREATE INDEX IF NOT EXISTS idx_event ON activity_logs(event);
|
||||
`
|
||||
|
||||
func Initialize(ctx context.Context) error {
|
||||
if !o.SwapIf(true) {
|
||||
panic("database: attempt to initialize more than once during application lifecycle")
|
||||
}
|
||||
p := filepath.Join(config.Get().System.RootDirectory, "wings.db")
|
||||
log.WithField("subsystem", "sqlite").WithField("path", p).Info("initializing local database")
|
||||
database, err := sql.Open("sqlite", p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "database: could not open database file")
|
||||
}
|
||||
db = database
|
||||
if _, err := db.ExecContext(ctx, schema); err != nil {
|
||||
return errors.Wrap(err, "database: failed to initialize base schema")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Instance() *sql.DB {
|
||||
if db == nil {
|
||||
panic("database: attempt to access instance before initialized")
|
||||
}
|
||||
return db
|
||||
}
|
|
@ -30,7 +30,7 @@ type Client interface {
|
|||
SetInstallationStatus(ctx context.Context, uuid string, successful bool) error
|
||||
SetTransferStatus(ctx context.Context, uuid string, successful bool) error
|
||||
ValidateSftpCredentials(ctx context.Context, request SftpAuthRequest) (SftpAuthResponse, error)
|
||||
SendActivityLogs(ctx context.Context, activity []json.RawMessage) error
|
||||
SendActivityLogs(ctx context.Context, activity interface{}) error
|
||||
}
|
||||
|
||||
type client struct {
|
||||
|
|
|
@ -3,7 +3,6 @@ package remote
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/goccy/go-json"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
|
@ -180,7 +179,7 @@ func (c *client) SendRestorationStatus(ctx context.Context, backup string, succe
|
|||
}
|
||||
|
||||
// SendActivityLogs sends activity logs back to the Panel for processing.
|
||||
func (c *client) SendActivityLogs(ctx context.Context, activity []json.RawMessage) error {
|
||||
func (c *client) SendActivityLogs(ctx context.Context, activity interface{}) error {
|
||||
resp, err := c.Post(ctx, "/activity", d{"data": activity})
|
||||
if err != nil {
|
||||
return errors.WithStackIf(err)
|
||||
|
|
|
@ -66,7 +66,6 @@ func Configure(m *wserver.Manager, client remote.Client) *gin.Engine {
|
|||
server.DELETE("", deleteServer)
|
||||
|
||||
server.GET("/logs", getServerLogs)
|
||||
server.GET("/activity", getServerActivityLogs)
|
||||
server.POST("/power", postServerPower)
|
||||
server.POST("/commands", postServerCommands)
|
||||
server.POST("/install", postServerInstall)
|
||||
|
|
|
@ -2,9 +2,6 @@ package router
|
|||
|
||||
import (
|
||||
"context"
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/pterodactyl/wings/internal/database"
|
||||
"github.com/xujiajun/nutsdb"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
|
@ -43,44 +40,6 @@ func getServerLogs(c *gin.Context) {
|
|||
c.JSON(http.StatusOK, gin.H{"data": out})
|
||||
}
|
||||
|
||||
// Returns the activity logs tracked internally for the server instance. Note that these
|
||||
// logs are routinely cleared out as Wings communicates directly with the Panel to pass
|
||||
// along all of the logs for servers it monitors. As activities are passed to the panel
|
||||
// they are deleted from Wings.
|
||||
//
|
||||
// As a result, this endpoint may or may not return data, and the data returned can change
|
||||
// between requests.
|
||||
func getServerActivityLogs(c *gin.Context) {
|
||||
s := ExtractServer(c)
|
||||
|
||||
var out [][]byte
|
||||
err := database.DB().View(func(tx *nutsdb.Tx) error {
|
||||
items, err := tx.LRange(database.ServerActivityBucket, []byte(s.ID()), 0, 10)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = items
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
var activity []*server.Activity
|
||||
for _, b := range out {
|
||||
var a server.Activity
|
||||
if err := json.Unmarshal(b, &a); err != nil {
|
||||
middleware.CaptureAndAbort(c, err)
|
||||
return
|
||||
}
|
||||
activity = append(activity, &a)
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"data": activity})
|
||||
}
|
||||
|
||||
// Handles a request to control the power state of a server. If the action being passed
|
||||
// through is invalid a 404 is returned. Otherwise, a HTTP/202 Accepted response is returned
|
||||
// and the actual power action is run asynchronously so that we don't have to block the
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"emperror.dev/errors"
|
||||
"encoding/gob"
|
||||
"github.com/apex/log"
|
||||
"github.com/goccy/go-json"
|
||||
"github.com/pterodactyl/wings/internal/database"
|
||||
"github.com/xujiajun/nutsdb"
|
||||
"github.com/pterodactyl/wings/internal/sqlite"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
@ -17,11 +17,11 @@ const ActivityPowerPrefix = "server:power."
|
|||
|
||||
const (
|
||||
ActivityConsoleCommand = Event("server:console.command")
|
||||
ActivityFileDeleted = Event("server:file.delete")
|
||||
ActivityFileRename = Event("server:file.rename")
|
||||
ActivityFileCreateDirectory = Event("server:file.create-directory")
|
||||
ActivityFileWrite = Event("server:file.write")
|
||||
ActivityFileRead = Event("server:file.read")
|
||||
ActivitySftpWrite = Event("server:sftp.write")
|
||||
ActivitySftpCreate = Event("server:sftp.create")
|
||||
ActivitySftpCreateDirectory = Event("server:sftp.create-directory")
|
||||
ActivitySftpRename = Event("server:sftp.rename")
|
||||
ActivitySftpDelete = Event("server:sftp.delete")
|
||||
)
|
||||
|
||||
var ipTrimRegex = regexp.MustCompile(`(:\d*)?$`)
|
||||
|
@ -106,21 +106,21 @@ func (a Activity) Save() error {
|
|||
// trim that off, otherwise it'll fail validation when sent to the Panel.
|
||||
a.IP = ipTrimRegex.ReplaceAllString(a.IP, "")
|
||||
|
||||
value, err := json.Marshal(a)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "database: failed to marshal activity into json bytes")
|
||||
var buf bytes.Buffer
|
||||
enc := gob.NewEncoder(&buf)
|
||||
if err := enc.Encode(&a.Metadata); err != nil {
|
||||
return errors.Wrap(err, "activity: error encoding metadata")
|
||||
}
|
||||
|
||||
return database.DB().Update(func(tx *nutsdb.Tx) error {
|
||||
log.WithField("subsystem", "activity").
|
||||
WithFields(log.Fields{"server": a.Server, "user": a.User, "event": a.Event, "ip": a.IP}).
|
||||
Debug("saving activity to database")
|
||||
log.WithField("subsystem", "activity").
|
||||
WithFields(log.Fields{"server": a.Server, "user": a.User, "event": a.Event, "ip": a.IP}).
|
||||
Debug("saving activity to database")
|
||||
|
||||
if err := tx.RPush(database.ServerActivityBucket, []byte("events"), value); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
stmt := `INSERT INTO activity_logs(event, user_uuid, server_uuid, metadata, ip, timestamp) VALUES(?, ?, ?, ?, ?, ?)`
|
||||
if _, err := sqlite.Instance().Exec(stmt, a.Event, a.User, a.Server, buf.Bytes(), a.IP, a.Timestamp); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) NewRequestActivity(user string, ip string) RequestActivity {
|
||||
|
|
|
@ -1,13 +1,9 @@
|
|||
package sftp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"emperror.dev/errors"
|
||||
"encoding/gob"
|
||||
"github.com/apex/log"
|
||||
"github.com/pterodactyl/wings/internal/database"
|
||||
"github.com/xujiajun/nutsdb"
|
||||
"regexp"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -17,7 +13,6 @@ type eventHandler struct {
|
|||
server string
|
||||
}
|
||||
|
||||
type Event string
|
||||
type FileAction struct {
|
||||
// Entity is the targeted file or directory (depending on the event) that the action
|
||||
// is being performed _against_, such as "/foo/test.txt". This will always be the full
|
||||
|
@ -29,53 +24,33 @@ type FileAction struct {
|
|||
Target string
|
||||
}
|
||||
|
||||
type EventRecord struct {
|
||||
Event Event
|
||||
Action FileAction
|
||||
IP string
|
||||
User string
|
||||
Timestamp time.Time
|
||||
}
|
||||
// Log parses a SFTP specific file activity event and then passes it off to be stored
|
||||
// in the normal activity database.
|
||||
func (eh *eventHandler) Log(e server.Event, fa FileAction) error {
|
||||
metadata := map[string]interface{}{
|
||||
"files": []string{fa.Entity},
|
||||
}
|
||||
if fa.Target != "" {
|
||||
metadata["files"] = []map[string]string{
|
||||
{"from": fa.Entity, "to": fa.Target},
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
EventWrite = Event("write")
|
||||
EventCreate = Event("create")
|
||||
EventCreateDirectory = Event("create-directory")
|
||||
EventRename = Event("rename")
|
||||
EventDelete = Event("delete")
|
||||
)
|
||||
|
||||
var ipTrimRegex = regexp.MustCompile(`(:\d*)?$`)
|
||||
|
||||
// Log logs an event into the Wings bucket for SFTP activity which then allows a seperate
|
||||
// cron to run and parse the events into a more manageable stream of event data to send
|
||||
// back to the Panel instance.
|
||||
func (eh *eventHandler) Log(e Event, fa FileAction) error {
|
||||
r := EventRecord{
|
||||
Event: e,
|
||||
Action: fa,
|
||||
IP: ipTrimRegex.ReplaceAllString(eh.ip, ""),
|
||||
r := server.Activity{
|
||||
User: eh.user,
|
||||
Server: eh.server,
|
||||
Event: e,
|
||||
Metadata: metadata,
|
||||
IP: eh.ip,
|
||||
Timestamp: time.Now().UTC(),
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
enc := gob.NewEncoder(&buf)
|
||||
if err := enc.Encode(r); err != nil {
|
||||
return errors.Wrap(err, "sftp: failed to encode event")
|
||||
}
|
||||
|
||||
return database.DB().Update(func(tx *nutsdb.Tx) error {
|
||||
if err := tx.RPush(database.SftpActivityBucket, []byte(eh.server), buf.Bytes()); err != nil {
|
||||
return errors.Wrap(err, "sftp: failed to push event to stack")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return errors.Wrap(r.Save(), "sftp: failed to store file event")
|
||||
}
|
||||
|
||||
// MustLog is a wrapper around log that will trigger a fatal error and exit the application
|
||||
// if an error is encountered during the logging of the event.
|
||||
func (eh *eventHandler) MustLog(e Event, fa FileAction) {
|
||||
func (eh *eventHandler) MustLog(e server.Event, fa FileAction) {
|
||||
if err := eh.Log(e, fa); err != nil {
|
||||
log.WithField("error", err).Fatal("sftp: failed to log event")
|
||||
}
|
||||
|
|
|
@ -130,9 +130,9 @@ func (h *Handler) Filewrite(request *sftp.Request) (io.WriterAt, error) {
|
|||
// Chown may or may not have been called in the touch function, so always do
|
||||
// it at this point to avoid the file being improperly owned.
|
||||
_ = h.fs.Chown(request.Filepath)
|
||||
event := EventWrite
|
||||
event := server.ActivitySftpWrite
|
||||
if permission == PermissionFileCreate {
|
||||
event = EventCreate
|
||||
event = server.ActivitySftpCreate
|
||||
}
|
||||
h.events.MustLog(event, FileAction{Entity: request.Filepath})
|
||||
return f, nil
|
||||
|
@ -185,7 +185,7 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||
l.WithField("error", err).Error("failed to rename file")
|
||||
return sftp.ErrSSHFxFailure
|
||||
}
|
||||
h.events.MustLog(EventRename, FileAction{Entity: request.Filepath, Target: request.Target})
|
||||
h.events.MustLog(server.ActivitySftpRename, FileAction{Entity: request.Filepath, Target: request.Target})
|
||||
break
|
||||
// Handle deletion of a directory. This will properly delete all of the files and
|
||||
// folders within that directory if it is not already empty (unlike a lot of SFTP
|
||||
|
@ -199,7 +199,7 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||
l.WithField("error", err).Error("failed to remove directory")
|
||||
return sftp.ErrSSHFxFailure
|
||||
}
|
||||
h.events.MustLog(EventDelete, FileAction{Entity: request.Filepath})
|
||||
h.events.MustLog(server.ActivitySftpDelete, FileAction{Entity: request.Filepath})
|
||||
return sftp.ErrSSHFxOk
|
||||
// Handle requests to create a new Directory.
|
||||
case "Mkdir":
|
||||
|
@ -212,7 +212,7 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||
l.WithField("error", err).Error("failed to create directory")
|
||||
return sftp.ErrSSHFxFailure
|
||||
}
|
||||
h.events.MustLog(EventCreateDirectory, FileAction{Entity: request.Filepath})
|
||||
h.events.MustLog(server.ActivitySftpCreateDirectory, FileAction{Entity: request.Filepath})
|
||||
break
|
||||
// Support creating symlinks between files. The source and target must resolve within
|
||||
// the server home directory.
|
||||
|
@ -245,7 +245,7 @@ func (h *Handler) Filecmd(request *sftp.Request) error {
|
|||
l.WithField("error", err).Error("failed to remove a file")
|
||||
return sftp.ErrSSHFxFailure
|
||||
}
|
||||
h.events.MustLog(EventDelete, FileAction{Entity: request.Filepath})
|
||||
h.events.MustLog(server.ActivitySftpDelete, FileAction{Entity: request.Filepath})
|
||||
return sftp.ErrSSHFxOk
|
||||
default:
|
||||
return sftp.ErrSSHFxOpUnsupported
|
||||
|
|
4
wings.go
4
wings.go
|
@ -3,13 +3,13 @@ package main
|
|||
import (
|
||||
"encoding/gob"
|
||||
"github.com/pterodactyl/wings/cmd"
|
||||
"github.com/pterodactyl/wings/sftp"
|
||||
"github.com/pterodactyl/wings/server"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
gob.Register(sftp.EventRecord{})
|
||||
gob.Register(server.ActivityMeta{})
|
||||
|
||||
// Since we make use of the math/rand package in the code, especially for generating
|
||||
// non-cryptographically secure random strings we need to seed the RNG. Just make use
|
||||
|
|
Loading…
Reference in New Issue
Block a user