Correctly handle migrations to a VHD setup

This commit is contained in:
Dane Everitt 2021-07-04 12:25:38 -07:00 committed by DaneEveritt
parent 265f8a6b39
commit d8a7bf2dde
No known key found for this signature in database
GPG Key ID: EEA66103B3D71F53
4 changed files with 14 additions and 13 deletions

View File

@ -4,7 +4,6 @@ import (
"context"
"os"
"os/exec"
"path/filepath"
"strings"
"emperror.dev/errors"
@ -32,7 +31,7 @@ func newMigrateVHDCommand() *cobra.Command {
},
Run: func(cmd *cobra.Command, args []string) {
client := remote.NewFromConfig(config.Get())
manager, err := server.NewManager(cmd.Context(), client)
manager, err := server.NewManager(cmd.Context(), client, true)
if err != nil {
log.WithField("error", err).Fatal("failed to create new server manager")
}
@ -48,11 +47,6 @@ func newMigrateVHDCommand() *cobra.Command {
// Run executes the migration command.
func (m *MigrateVHDCommand) Run(ctx context.Context) error {
root := filepath.Join(config.Get().System.Data, ".disks")
if err := os.MkdirAll(root, 0600); err != nil {
return errors.Wrap(err, "failed to create root directory for virtual disks")
}
for _, s := range m.manager.All() {
s.Log().Debug("starting migration of server contents to virtual disk...")

View File

@ -139,7 +139,7 @@ func rootCmdRun(cmd *cobra.Command, _ []string) {
log.WithField("error", err).Fatal("failed to initialize database")
}
manager, err := server.NewManager(cmd.Context(), pclient)
manager, err := server.NewManager(cmd.Context(), pclient, false)
if err != nil {
log.WithField("error", err).Fatal("failed to load server configurations")
}

View File

@ -5,6 +5,7 @@ import (
"fmt"
"os"
"os/exec"
"path"
"strings"
"emperror.dev/errors"
@ -182,6 +183,10 @@ func (d *Disk) Allocate(ctx context.Context) error {
} else if err != nil {
return errors.Wrap(err, "vhd: failed to check for existence of root disk")
}
trim := path.Base(d.diskPath)
if err := os.MkdirAll(strings.TrimSuffix(d.diskPath, trim), 0600); err != nil {
return errors.Wrap(err, "vhd: failed to create base vhd disk directory")
}
// We use 1024 as the multiplier for all of the disk space logic within the
// application. Passing "K" (/1024) is the same as "KiB" for fallocate, but
// is different than "KB" (/1000).

View File

@ -22,16 +22,18 @@ import (
)
type Manager struct {
mu sync.RWMutex
client remote.Client
servers []*Server
mu sync.RWMutex
client remote.Client
skipVhdInitialization bool
servers []*Server
}
// NewManager returns a new server manager instance. This will boot up all the
// servers that are currently present on the filesystem and set them into the
// manager.
func NewManager(ctx context.Context, client remote.Client) (*Manager, error) {
func NewManager(ctx context.Context, client remote.Client, skipVhdInit bool) (*Manager, error) {
m := NewEmptyManager(client)
m.skipVhdInitialization = skipVhdInit
if err := m.init(ctx); err != nil {
return nil, err
}
@ -198,7 +200,7 @@ func (m *Manager) InitServer(ctx context.Context, data remote.ServerConfiguratio
s.fs = filesystem.New(s.Id(), s.DiskSpace(), s.Config().Egg.FileDenylist)
// If this is a virtuakl filesystem we need to go ahead and mount the disk
// so that everything is accessible.
if s.fs.IsVirtual() {
if s.fs.IsVirtual() && !m.skipVhdInitialization {
log.WithField("server", s.Id()).Info("mounting virtual disk for server")
if err := s.fs.MountDisk(ctx); err != nil {
return nil, err