Streaming Transfers (#153)

This commit is contained in:
Matthew Penner
2022-11-14 18:25:01 -07:00
committed by GitHub
parent 4781eeaedc
commit 57e7eb714c
21 changed files with 1015 additions and 612 deletions

View File

@@ -66,7 +66,7 @@ func (b *LocalBackup) Generate(ctx context.Context, basePath, ignore string) (*A
}
b.log().WithField("path", b.Path()).Info("creating backup for server")
if err := a.Create(b.Path()); err != nil {
if err := a.Create(ctx, b.Path()); err != nil {
return nil, err
}
b.log().Info("created backup successfully")

View File

@@ -57,7 +57,7 @@ func (s *S3Backup) Generate(ctx context.Context, basePath, ignore string) (*Arch
}
s.log().WithField("path", s.Path()).Info("creating backup for server")
if err := a.Create(s.Path()); err != nil {
if err := a.Create(ctx, s.Path()); err != nil {
return nil, err
}
s.log().Info("created backup successfully")

View File

@@ -5,8 +5,7 @@ import (
"github.com/pterodactyl/wings/system"
)
// Defines all of the possible output events for a server.
// noinspection GoNameStartsWithPackageName
// Defines all the possible output events for a server.
const (
DaemonMessageEvent = "daemon message"
InstallOutputEvent = "install output"
@@ -19,6 +18,7 @@ const (
BackupCompletedEvent = "backup completed"
TransferLogsEvent = "transfer logs"
TransferStatusEvent = "transfer status"
DeletedEvent = "deleted"
)
// Events returns the server's emitter instance.

View File

@@ -2,13 +2,13 @@ package filesystem
import (
"archive/tar"
"context"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"emperror.dev/errors"
"github.com/apex/log"
@@ -18,7 +18,7 @@ import (
ignore "github.com/sabhiram/go-gitignore"
"github.com/pterodactyl/wings/config"
"github.com/pterodactyl/wings/system"
"github.com/pterodactyl/wings/internal/progress"
)
const memory = 4 * 1024
@@ -33,13 +33,13 @@ var pool = sync.Pool{
// TarProgress .
type TarProgress struct {
*tar.Writer
p *Progress
p *progress.Progress
}
// NewTarProgress .
func NewTarProgress(w *tar.Writer, p *Progress) *TarProgress {
func NewTarProgress(w *tar.Writer, p *progress.Progress) *TarProgress {
if p != nil {
p.w = w
p.Writer = w
}
return &TarProgress{
Writer: w,
@@ -47,6 +47,7 @@ func NewTarProgress(w *tar.Writer, p *Progress) *TarProgress {
}
}
// Write .
func (p *TarProgress) Write(v []byte) (int, error) {
if p.p == nil {
return p.Writer.Write(v)
@@ -54,84 +55,6 @@ func (p *TarProgress) Write(v []byte) (int, error) {
return p.p.Write(v)
}
// Progress is used to track the progress of any I/O operation that are being
// performed.
type Progress struct {
// written is the total size of the files that have been written to the writer.
written int64
// Total is the total size of the archive in bytes.
total int64
// w .
w io.Writer
}
// NewProgress .
func NewProgress(total int64) *Progress {
return &Progress{total: total}
}
// SetWriter sets the writer progress will forward writes to.
// NOTE: This function is not thread safe.
func (p *Progress) SetWriter(w io.Writer) {
p.w = w
}
// Written returns the total number of bytes written.
// This function should be used when the progress is tracking data being written.
func (p *Progress) Written() int64 {
return atomic.LoadInt64(&p.written)
}
// Total returns the total size in bytes.
func (p *Progress) Total() int64 {
return atomic.LoadInt64(&p.total)
}
// Write totals the number of bytes that have been written to the writer.
func (p *Progress) Write(v []byte) (int, error) {
n := len(v)
atomic.AddInt64(&p.written, int64(n))
if p.w != nil {
return p.w.Write(v)
}
return n, nil
}
// Progress returns a formatted progress string for the current progress.
func (p *Progress) Progress(width int) string {
// current = 100 (Progress, dynamic)
// total = 1000 (Content-Length, dynamic)
// width = 25 (Number of ticks to display, static)
// widthPercentage = 100 / width (What percentage does each tick represent, static)
//
// percentageDecimal = current / total = 0.1
// percentage = percentageDecimal * 100 = 10%
// ticks = percentage / widthPercentage = 2.5
//
// ticks is a float64, so we cast it to an int which rounds it down to 2.
// Values are cast to floats to prevent integer division.
current := p.Written()
total := p.Total()
// width := is passed as a parameter
widthPercentage := float64(100) / float64(width)
percentageDecimal := float64(current) / float64(total)
percentage := percentageDecimal * 100
ticks := int(percentage / widthPercentage)
// Ensure that we never get a negative number of ticks, this will prevent strings#Repeat
// from panicking. A negative number of ticks is likely to happen when the total size is
// inaccurate, such as when we are going off of rough disk usage calculation.
if ticks < 0 {
ticks = 0
} else if ticks > width {
ticks = width
}
bar := strings.Repeat("=", ticks) + strings.Repeat(" ", width-ticks)
return "[" + bar + "] " + system.FormatBytes(current) + " / " + system.FormatBytes(total)
}
type Archive struct {
// BasePath is the absolute path to create the archive from where Files and Ignore are
// relative to.
@@ -146,12 +69,12 @@ type Archive struct {
Files []string
// Progress wraps the writer of the archive to pass through the progress tracker.
Progress *Progress
Progress *progress.Progress
}
// Create creates an archive at dst with all the files defined in the
// included Files array.
func (a *Archive) Create(dst string) error {
func (a *Archive) Create(ctx context.Context, dst string) error {
f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return err
@@ -169,6 +92,11 @@ func (a *Archive) Create(dst string) error {
writer = f
}
return a.Stream(ctx, writer)
}
// Stream .
func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
// Choose which compression level to use based on the compression_level configuration option
var compressionLevel int
switch config.Get().System.Backups.CompressionLevel {
@@ -183,7 +111,7 @@ func (a *Archive) Create(dst string) error {
}
// Create a new gzip writer around the file.
gw, _ := pgzip.NewWriterLevel(writer, compressionLevel)
gw, _ := pgzip.NewWriterLevel(w, compressionLevel)
_ = gw.SetConcurrency(1<<20, 1)
defer gw.Close()
@@ -197,16 +125,16 @@ func (a *Archive) Create(dst string) error {
options := &godirwalk.Options{
FollowSymbolicLinks: false,
Unsorted: true,
Callback: a.callback(pw),
}
// If we're specifically looking for only certain files, or have requested
// that certain files be ignored we'll update the callback function to reflect
// that request.
var callback godirwalk.WalkFunc
if len(a.Files) == 0 && len(a.Ignore) > 0 {
i := ignore.CompileIgnoreLines(strings.Split(a.Ignore, "\n")...)
options.Callback = a.callback(pw, func(_ string, rp string) error {
callback = a.callback(pw, func(_ string, rp string) error {
if i.MatchesPath(rp) {
return godirwalk.SkipThis
}
@@ -214,7 +142,19 @@ func (a *Archive) Create(dst string) error {
return nil
})
} else if len(a.Files) > 0 {
options.Callback = a.withFilesCallback(pw)
callback = a.withFilesCallback(pw)
} else {
callback = a.callback(pw)
}
// Set the callback function, wrapped with support for context cancellation.
options.Callback = func(path string, de *godirwalk.Dirent) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return callback(path, de)
}
}
// Recursively walk the path we are archiving.

View File

@@ -1,48 +0,0 @@
package filesystem
import (
"bytes"
"testing"
. "github.com/franela/goblin"
)
func TestProgress(t *testing.T) {
g := Goblin(t)
g.Describe("Progress", func() {
g.It("properly initializes", func() {
total := int64(1000)
p := NewProgress(total)
g.Assert(p).IsNotNil()
g.Assert(p.Total()).Equal(total)
g.Assert(p.Written()).Equal(int64(0))
})
g.It("increments written when Write is called", func() {
v := []byte("hello")
p := NewProgress(1000)
_, err := p.Write(v)
g.Assert(err).IsNil()
g.Assert(p.Written()).Equal(int64(len(v)))
})
g.It("renders a progress bar", func() {
v := bytes.Repeat([]byte{' '}, 100)
p := NewProgress(1000)
_, err := p.Write(v)
g.Assert(err).IsNil()
g.Assert(p.Written()).Equal(int64(len(v)))
g.Assert(p.Progress(25)).Equal("[== ] 100 B / 1000 B")
})
g.It("renders a progress bar when written exceeds total", func() {
v := bytes.Repeat([]byte{' '}, 1001)
p := NewProgress(1000)
_, err := p.Write(v)
g.Assert(err).IsNil()
g.Assert(p.Written()).Equal(int64(len(v)))
g.Assert(p.Progress(25)).Equal("[=========================] 1001 B / 1000 B")
})
})
}

View File

@@ -6,6 +6,7 @@ import (
"compress/gzip"
"context"
"fmt"
"io"
iofs "io/fs"
"os"
"path"
@@ -21,7 +22,7 @@ import (
"github.com/mholt/archiver/v4"
)
// CompressFiles compresses all of the files matching the given paths in the
// CompressFiles compresses all the files matching the given paths in the
// specified directory. This function also supports passing nested paths to only
// compress certain files and folders when working in a larger directory. This
// effectively creates a local backup, but rather than ignoring specific files
@@ -36,7 +37,7 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
return nil, err
}
// Take all of the paths passed in and merge them together with the root directory we've gotten.
// Take all the paths passed in and merge them together with the root directory we've gotten.
for i, p := range paths {
paths[i] = filepath.Join(cleanedRootDir, p)
}
@@ -52,7 +53,7 @@ func (fs *Filesystem) CompressFiles(dir string, paths []string) (os.FileInfo, er
fmt.Sprintf("archive-%s.tar.gz", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")),
)
if err := a.Create(d); err != nil {
if err := a.Create(context.Background(), d); err != nil {
return nil, err
}
@@ -147,6 +148,7 @@ func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file
if err != nil {
return err
}
// TODO: defer file close?
// Identify the type of archive we are dealing with.
format, input, err := archiver.Identify(filepath.Base(file), f)
@@ -157,13 +159,49 @@ func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file
return err
}
return fs.extractStream(ctx, extractStreamOptions{
Directory: dir,
Format: format,
Reader: input,
})
}
// ExtractStreamUnsafe .
func (fs *Filesystem) ExtractStreamUnsafe(ctx context.Context, dir string, r io.Reader) error {
format, input, err := archiver.Identify("archive.tar.gz", r)
if err != nil {
if errors.Is(err, archiver.ErrNoMatch) {
return newFilesystemError(ErrCodeUnknownArchive, err)
}
return err
}
return fs.extractStream(ctx, extractStreamOptions{
Directory: dir,
Format: format,
Reader: input,
})
}
type extractStreamOptions struct {
// The directory to extract the archive to.
Directory string
// File name of the archive.
FileName string
// Format of the archive.
Format archiver.Format
// Reader for the archive.
Reader io.Reader
}
func (fs *Filesystem) extractStream(ctx context.Context, opts extractStreamOptions) error {
// Decompress and extract archive
if ex, ok := format.(archiver.Extractor); ok {
return ex.Extract(ctx, input, nil, func(ctx context.Context, f archiver.File) error {
if ex, ok := opts.Format.(archiver.Extractor); ok {
return ex.Extract(ctx, opts.Reader, nil, func(ctx context.Context, f archiver.File) error {
if f.IsDir() {
return nil
}
p := filepath.Join(dir, ExtractNameFromArchive(f))
p := filepath.Join(opts.Directory, ExtractNameFromArchive(f))
// If it is ignored, just don't do anything with the file and skip over it.
if err := fs.IsIgnored(p); err != nil {
return nil
@@ -174,20 +212,19 @@ func (fs *Filesystem) DecompressFileUnsafe(ctx context.Context, dir string, file
}
defer r.Close()
if err := fs.Writefile(p, r); err != nil {
return wrapError(err, file)
return wrapError(err, opts.FileName)
}
// Update the file permissions to the one set in the archive.
if err := fs.Chmod(p, f.Mode()); err != nil {
return wrapError(err, file)
return wrapError(err, opts.FileName)
}
// Update the file modification time to the one set in the archive.
if err := fs.Chtimes(p, f.ModTime(), f.ModTime()); err != nil {
return wrapError(err, file)
return wrapError(err, opts.FileName)
}
return nil
})
}
return nil
}

View File

@@ -0,0 +1,19 @@
package installer
type validationError struct {
msg string
}
func (e *validationError) Error() string {
return e.msg
}
func IsValidationError(err error) bool {
_, ok := err.(*validationError)
return ok
}
func NewValidationError(msg string) error {
return &validationError{msg: msg}
}

View File

@@ -0,0 +1,52 @@
package installer
import (
"context"
"emperror.dev/errors"
"github.com/asaskevich/govalidator"
"github.com/pterodactyl/wings/remote"
"github.com/pterodactyl/wings/server"
)
type Installer struct {
server *server.Server
StartOnCompletion bool
}
type ServerDetails struct {
UUID string `json:"uuid"`
StartOnCompletion bool `json:"start_on_completion"`
}
// New validates the received data to ensure that all the required fields
// have been passed along in the request. This should be manually run before
// calling Execute().
func New(ctx context.Context, manager *server.Manager, details ServerDetails) (*Installer, error) {
if !govalidator.IsUUIDv4(details.UUID) {
return nil, NewValidationError("uuid provided was not in a valid format")
}
c, err := manager.Client().GetServerConfiguration(ctx, details.UUID)
if err != nil {
if !remote.IsRequestError(err) {
return nil, errors.WithStackIf(err)
}
return nil, errors.WrapIf(err, "installer: could not get server configuration from remote API")
}
// Create a new server instance using the configuration we wrote to the disk
// so that everything gets instantiated correctly on the struct.
s, err := manager.InitServer(c)
if err != nil {
return nil, errors.WrapIf(err, "installer: could not init server instance")
}
i := Installer{server: s, StartOnCompletion: details.StartOnCompletion}
return &i, nil
}
// Server returns the server instance.
func (i *Installer) Server() *server.Server {
return i.server
}

View File

@@ -0,0 +1,52 @@
package transfer
import (
"context"
"fmt"
"io"
"github.com/pterodactyl/wings/internal/progress"
"github.com/pterodactyl/wings/server/filesystem"
)
// Archive returns an archive that can be used to stream the contents of the
// contents of a server.
func (t *Transfer) Archive() (*Archive, error) {
if t.archive == nil {
// Get the disk usage of the server (used to calculate the progress of the archive process)
rawSize, err := t.Server.Filesystem().DiskUsage(true)
if err != nil {
return nil, fmt.Errorf("transfer: failed to get server disk usage: %w", err)
}
// Create a new archive instance and assign it to the transfer.
t.archive = NewArchive(t, uint64(rawSize))
}
return t.archive, nil
}
// Archive represents an archive used to transfer the contents of a server.
type Archive struct {
archive *filesystem.Archive
}
// NewArchive returns a new archive associated with the given transfer.
func NewArchive(t *Transfer, size uint64) *Archive {
return &Archive{
archive: &filesystem.Archive{
BasePath: t.Server.Filesystem().Path(),
Progress: progress.NewProgress(size),
},
}
}
// Stream returns a reader that can be used to stream the contents of the archive.
func (a *Archive) Stream(ctx context.Context, w io.Writer) error {
return a.archive.Stream(ctx, w)
}
// Progress returns the current progress of the archive.
func (a *Archive) Progress() *progress.Progress {
return a.archive.Progress
}

4
server/transfer/doc.go Normal file
View File

@@ -0,0 +1,4 @@
// Package transfer handles all logic related to transferring servers between
// two nodes. This includes the logic for archiving a server on the source node
// and logic for importing a server from the source node into the target node.
package transfer

View File

@@ -0,0 +1,57 @@
package transfer
import (
"sync"
)
var (
incomingTransfers = NewManager()
outgoingTransfers = NewManager()
)
// Incoming returns a transfer manager for incoming transfers.
func Incoming() *Manager {
return incomingTransfers
}
// Outgoing returns a transfer manager for outgoing transfers.
func Outgoing() *Manager {
return outgoingTransfers
}
// Manager manages transfers.
type Manager struct {
mu sync.RWMutex
transfers map[string]*Transfer
}
// NewManager returns a new transfer manager.
func NewManager() *Manager {
return &Manager{
transfers: make(map[string]*Transfer),
}
}
// Add adds a transfer to the manager.
func (m *Manager) Add(transfer *Transfer) {
m.mu.Lock()
defer m.mu.Unlock()
m.transfers[transfer.Server.ID()] = transfer
}
// Remove removes a transfer from the manager.
func (m *Manager) Remove(transfer *Transfer) {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.transfers, transfer.Server.ID())
}
// Get gets a transfer from the manager using a server ID.
func (m *Manager) Get(id string) *Transfer {
m.mu.RLock()
defer m.mu.RUnlock()
return m.transfers[id]
}

159
server/transfer/source.go Normal file
View File

@@ -0,0 +1,159 @@
package transfer
import (
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"time"
"github.com/pterodactyl/wings/internal/progress"
)
// PushArchiveToTarget POSTs the archive to the target node and returns the
// response body.
func (t *Transfer) PushArchiveToTarget(url, token string) ([]byte, error) {
ctx, cancel := context.WithCancel(t.ctx)
defer cancel()
t.SendMessage("Preparing to stream server data to destination...")
t.SetStatus(StatusProcessing)
a, err := t.Archive()
if err != nil {
t.Error(err, "Failed to get archive for transfer.")
return nil, errors.New("failed to get archive for transfer")
}
t.SendMessage("Streaming archive to destination...")
// Send the upload progress to the websocket every 5 seconds.
ctx2, cancel2 := context.WithCancel(ctx)
defer cancel2()
go func(ctx context.Context, p *progress.Progress, tc *time.Ticker) {
defer tc.Stop()
for {
select {
case <-ctx.Done():
return
case <-tc.C:
t.SendMessage("Uploading " + p.Progress(25))
}
}
}(ctx2, a.Progress(), time.NewTicker(5*time.Second))
// Create a new request using the pipe as the body.
body, writer := io.Pipe()
defer body.Close()
defer writer.Close()
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, body)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", token)
// Create a new multipart writer that writes the archive to the pipe.
mp := multipart.NewWriter(writer)
defer mp.Close()
req.Header.Set("Content-Type", mp.FormDataContentType())
// Create a new goroutine to write the archive to the pipe used by the
// multipart writer.
errChan := make(chan error)
go func() {
defer close(errChan)
defer writer.Close()
defer mp.Close()
src, pw := io.Pipe()
defer src.Close()
defer pw.Close()
h := sha256.New()
tee := io.TeeReader(src, h)
dest, err := mp.CreateFormFile("archive", "archive.tar.gz")
if err != nil {
errChan <- errors.New("failed to create form file")
return
}
ch := make(chan error)
go func() {
defer close(ch)
if _, err := io.Copy(dest, tee); err != nil {
ch <- fmt.Errorf("failed to stream archive to destination: %w", err)
return
}
t.Log().Debug("finished copying dest to tee")
}()
if err := a.Stream(ctx, pw); err != nil {
errChan <- errors.New("failed to stream archive to pipe")
return
}
t.Log().Debug("finished streaming archive to pipe")
// Close the pipe writer early to release resources and ensure that the data gets flushed.
_ = pw.Close()
// Wait for the copy to finish before we continue.
t.Log().Debug("waiting on copy to finish")
if err := <-ch; err != nil {
errChan <- err
return
}
if err := mp.WriteField("checksum", hex.EncodeToString(h.Sum(nil))); err != nil {
errChan <- errors.New("failed to stream checksum")
return
}
cancel2()
t.SendMessage("Finished streaming archive to destination.")
if err := mp.Close(); err != nil {
t.Log().WithError(err).Error("error while closing multipart writer")
}
t.Log().Debug("closed multipart writer")
}()
t.Log().Debug("sending archive to destination")
client := http.Client{Timeout: 0}
res, err := client.Do(req)
t.Log().Debug("waiting for stream to complete")
select {
case <-ctx.Done():
return nil, ctx.Err()
case err2 := <-errChan:
t.Log().Debug("stream completed")
if err != nil || err2 != nil {
if err == context.Canceled {
return nil, err
}
t.Log().WithError(err).Debug("failed to send archive to destination")
return nil, fmt.Errorf("http error: %w, multipart error: %v", err, err2)
}
defer res.Body.Close()
t.Log().Debug("received response from destination")
v, err := io.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %w", err)
}
if res.StatusCode != http.StatusOK {
return nil, errors.New(string(v))
}
return v, nil
}
}

128
server/transfer/transfer.go Normal file
View File

@@ -0,0 +1,128 @@
package transfer
import (
"context"
"time"
"github.com/apex/log"
"github.com/mitchellh/colorstring"
"github.com/pterodactyl/wings/server"
"github.com/pterodactyl/wings/system"
)
// Status represents the current status of a transfer.
type Status string
// String satisfies the fmt.Stringer interface.
func (s Status) String() string {
return string(s)
}
const (
// StatusPending is the status of a transfer when it is first created.
StatusPending Status = "pending"
// StatusProcessing is the status of a transfer when it is currently in
// progress, such as when the archive is being streamed to the target node.
StatusProcessing Status = "processing"
// StatusCancelling is the status of a transfer when it is in the process of
// being cancelled.
StatusCancelling Status = "cancelling"
// StatusCancelled is the final status of a transfer when it has been
// cancelled.
StatusCancelled Status = "cancelled"
// StatusFailed is the final status of a transfer when it has failed.
StatusFailed Status = "failed"
// StatusCompleted is the final status of a transfer when it has completed.
StatusCompleted Status = "completed"
)
// Transfer represents a transfer of a server from one node to another.
type Transfer struct {
// ctx is the context for the transfer.
ctx context.Context
// cancel is used to cancel all ongoing transfer operations for the server.
cancel *context.CancelFunc
// Server associated with the transfer.
Server *server.Server
// status of the transfer.
status *system.Atomic[Status]
// archive is the archive that is being created for the transfer.
archive *Archive
}
// New returns a new transfer instance for the given server.
func New(ctx context.Context, s *server.Server) *Transfer {
ctx, cancel := context.WithCancel(ctx)
return &Transfer{
ctx: ctx,
cancel: &cancel,
Server: s,
status: system.NewAtomic(StatusPending),
}
}
// Context returns the context for the transfer.
func (t *Transfer) Context() context.Context {
return t.ctx
}
// Cancel cancels the transfer.
func (t *Transfer) Cancel() {
status := t.Status()
if status == StatusCancelling ||
status == StatusCancelled ||
status == StatusCompleted ||
status == StatusFailed {
return
}
if t.cancel == nil {
return
}
t.SetStatus(StatusCancelling)
(*t.cancel)()
}
// Status returns the current status of the transfer.
func (t *Transfer) Status() Status {
return t.status.Load()
}
// SetStatus sets the status of the transfer.
func (t *Transfer) SetStatus(s Status) {
// TODO: prevent certain status changes from happening.
// If we are cancelling, then we can't go back to processing.
t.status.Store(s)
t.Server.Events().Publish(server.TransferStatusEvent, s)
}
// SendMessage sends a message to the server's console.
func (t *Transfer) SendMessage(v string) {
t.Server.Events().Publish(
server.TransferLogsEvent,
colorstring.Color("[yellow][bold]"+time.Now().Format(time.RFC1123)+" [Transfer System] [Source Node]:[default] "+v),
)
}
// Error logs an error that occurred on the source node.
func (t *Transfer) Error(err error, v string) {
t.Log().WithError(err).Error(v)
t.SendMessage(v)
}
// Log returns a logger for the transfer.
func (t *Transfer) Log() *log.Entry {
if t.Server == nil {
return log.WithField("subsystem", "transfer")
}
return t.Server.Log().WithField("subsystem", "transfer")
}