[lxc-devel] [lxd/master] vmqemu package
tomponline on Github
lxc-bot at linuxcontainers.org
Fri Dec 6 12:06:34 UTC 2019
A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 583 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20191206/7715c098/attachment-0001.bin>
-------------- next part --------------
From 6bc5e2c0acba1986bd18f7afd49ac0aa7d6e7389 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 10:55:32 +0000
Subject: [PATCH 1/9] lxd/vmqemu: Moves vmqemu files into sub folder for their
own package
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/{ => instance/vmqemu}/vm_qemu.go | 0
lxd/{ => instance/vmqemu}/vm_qemu_cmd.go | 0
2 files changed, 0 insertions(+), 0 deletions(-)
rename lxd/{ => instance/vmqemu}/vm_qemu.go (100%)
rename lxd/{ => instance/vmqemu}/vm_qemu_cmd.go (100%)
diff --git a/lxd/vm_qemu.go b/lxd/instance/vmqemu/vm_qemu.go
similarity index 100%
rename from lxd/vm_qemu.go
rename to lxd/instance/vmqemu/vm_qemu.go
diff --git a/lxd/vm_qemu_cmd.go b/lxd/instance/vmqemu/vm_qemu_cmd.go
similarity index 100%
rename from lxd/vm_qemu_cmd.go
rename to lxd/instance/vmqemu/vm_qemu_cmd.go
From 16e6d05960739f581fde3a1c53346e80c82ab841 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 10:57:12 +0000
Subject: [PATCH 2/9] lxd/instance/vmqemu/vm/qemu: Updates VMQemu to exist in
own package
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/instance/vmqemu/vm_qemu.go | 290 +++++++++++++++++----------------
1 file changed, 149 insertions(+), 141 deletions(-)
diff --git a/lxd/instance/vmqemu/vm_qemu.go b/lxd/instance/vmqemu/vm_qemu.go
index 8af8c33376..0b3e56e95d 100644
--- a/lxd/instance/vmqemu/vm_qemu.go
+++ b/lxd/instance/vmqemu/vm_qemu.go
@@ -1,4 +1,4 @@
-package main
+package vmqemu
import (
"bytes"
@@ -48,14 +48,15 @@ import (
"github.com/lxc/lxd/shared/units"
)
-var vmQemuAgentOfflineErr = fmt.Errorf("LXD VM agent isn't currently running")
+var errVMQemuAgentOffline = fmt.Errorf("LXD VM agent isn't currently running")
var vmConsole = map[int]bool{}
var vmConsoleLock sync.Mutex
-func vmQemuLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (instance.Instance, error) {
+// Load creates a VMQemu instance from the supplied InstanceArgs.
+func Load(s *state.State, args db.InstanceArgs, profiles []api.Profile) (instance.Instance, error) {
// Create the instance struct.
- vm := vmQemuInstantiate(s, args)
+ vm := Instantiate(s, args, nil)
// Expand config and devices.
err := vm.expandConfig(profiles)
@@ -71,9 +72,11 @@ func vmQemuLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (i
return vm, nil
}
-// vmQemuInstantiate creates a vmQemu struct without initializing it.
-func vmQemuInstantiate(s *state.State, args db.InstanceArgs) *vmQemu {
- vm := &vmQemu{
+// Instantiate creates a vmQemu struct without expanding config. The expandedDevices argument is
+// used during device config validation when the devices have already been expanded and we do not
+// have access to the profiles used to do it. This can be safely passed as nil if not required.
+func Instantiate(s *state.State, args db.InstanceArgs, expandedDevices deviceConfig.Devices) *VMQemu {
+ vm := &VMQemu{
state: s,
id: args.ID,
project: args.Project,
@@ -106,13 +109,18 @@ func vmQemuInstantiate(s *state.State, args db.InstanceArgs) *vmQemu {
vm.lastUsedDate = time.Time{}
}
+ // This is passed during expanded config validation.
+ if expandedDevices != nil {
+ vm.expandedDevices = expandedDevices
+ }
+
return vm
}
-// vmQemuCreate creates a new storage volume record and returns an initialised Instance.
-func vmQemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error) {
+// Create creates a new storage volume record and returns an initialised Instance.
+func Create(s *state.State, args db.InstanceArgs) (instance.Instance, error) {
// Create the instance struct.
- vm := &vmQemu{
+ vm := &VMQemu{
state: s,
id: args.ID,
project: args.Project,
@@ -170,13 +178,13 @@ func vmQemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, erro
}
// Validate expanded config.
- err = containerValidConfig(s.OS, vm.expandedConfig, false, true)
+ err = instance.ValidConfig(s.OS, vm.expandedConfig, false, true)
if err != nil {
logger.Error("Failed creating instance", ctxMap)
return nil, err
}
- err = instanceValidDevices(s, s.Cluster, vm.Type(), vm.Name(), vm.expandedDevices, true)
+ err = instance.ValidDevices(s, s.Cluster, vm.Type(), vm.Name(), vm.expandedDevices, true)
if err != nil {
logger.Error("Failed creating instance", ctxMap)
return nil, errors.Wrap(err, "Invalid devices")
@@ -238,8 +246,8 @@ func vmQemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, erro
return vm, nil
}
-// The QEMU virtual machine driver.
-type vmQemu struct {
+// VMQemu is the QEMU virtual machine driver.
+type VMQemu struct {
// Properties.
architecture int
dbType instancetype.Type
@@ -279,7 +287,7 @@ type vmQemu struct {
// getAgentClient returns the current agent client handle. To avoid TLS setup each time this
// function is called, the handle is cached internally in the vmQemu struct.
-func (vm *vmQemu) getAgentClient() (*http.Client, error) {
+func (vm *VMQemu) getAgentClient() (*http.Client, error) {
if vm.agentClient != nil {
return vm.agentClient, nil
}
@@ -299,8 +307,8 @@ func (vm *vmQemu) getAgentClient() (*http.Client, error) {
}
// getStoragePool returns the current storage pool handle. To avoid a DB lookup each time this
-// function is called, the handle is cached internally in the vmQemu struct.
-func (vm *vmQemu) getStoragePool() (storagePools.Pool, error) {
+// function is called, the handle is cached internally in the VMQemu struct.
+func (vm *VMQemu) getStoragePool() (storagePools.Pool, error) {
if vm.storagePool != nil {
return vm.storagePool, nil
}
@@ -314,7 +322,7 @@ func (vm *vmQemu) getStoragePool() (storagePools.Pool, error) {
return vm.storagePool, nil
}
-func (vm *vmQemu) getMonitorEventHandler() func(event string, data map[string]interface{}) {
+func (vm *VMQemu) getMonitorEventHandler() func(event string, data map[string]interface{}) {
id := vm.id
state := vm.state
@@ -323,7 +331,7 @@ func (vm *vmQemu) getMonitorEventHandler() func(event string, data map[string]in
return
}
- inst, err := instanceLoadById(state, id)
+ inst, err := instance.LoadByID(state, id)
if err != nil {
logger.Errorf("Failed to load instance with id=%d", id)
return
@@ -336,7 +344,7 @@ func (vm *vmQemu) getMonitorEventHandler() func(event string, data map[string]in
target = "reboot"
}
- err = inst.(*vmQemu).OnStop(target)
+ err = inst.(*VMQemu).OnStop(target)
if err != nil {
logger.Errorf("Failed to cleanly stop instance '%s': %v", project.Prefix(inst.Project(), inst.Name()), err)
return
@@ -346,7 +354,7 @@ func (vm *vmQemu) getMonitorEventHandler() func(event string, data map[string]in
}
// mount the instance's config volume if needed.
-func (vm *vmQemu) mount() (bool, error) {
+func (vm *VMQemu) mount() (bool, error) {
var pool storagePools.Pool
pool, err := vm.getStoragePool()
if err != nil {
@@ -362,7 +370,7 @@ func (vm *vmQemu) mount() (bool, error) {
}
// unmount the instance's config volume if needed.
-func (vm *vmQemu) unmount() (bool, error) {
+func (vm *VMQemu) unmount() (bool, error) {
var pool storagePools.Pool
pool, err := vm.getStoragePool()
if err != nil {
@@ -378,7 +386,7 @@ func (vm *vmQemu) unmount() (bool, error) {
}
// generateAgentCert creates the necessary server key and certificate if needed.
-func (vm *vmQemu) generateAgentCert() (string, string, string, string, error) {
+func (vm *VMQemu) generateAgentCert() (string, string, string, string, error) {
// Mount the instance's config volume if needed.
ourMount, err := vm.mount()
if err != nil {
@@ -430,7 +438,7 @@ func (vm *vmQemu) generateAgentCert() (string, string, string, string, error) {
return string(agentCert), string(agentKey), string(clientCert), string(clientKey), nil
}
-func (vm *vmQemu) Freeze() error {
+func (vm *VMQemu) Freeze() error {
// Connect to the monitor.
monitor, err := qmp.Connect(vm.getMonitorPath(), vm.getMonitorEventHandler())
if err != nil {
@@ -446,7 +454,7 @@ func (vm *vmQemu) Freeze() error {
return nil
}
-func (vm *vmQemu) OnStop(target string) error {
+func (vm *VMQemu) OnStop(target string) error {
vm.cleanupDevices()
os.Remove(vm.pidFilePath())
os.Remove(vm.getMonitorPath())
@@ -465,7 +473,7 @@ func (vm *vmQemu) OnStop(target string) error {
return nil
}
-func (vm *vmQemu) Shutdown(timeout time.Duration) error {
+func (vm *VMQemu) Shutdown(timeout time.Duration) error {
if !vm.IsRunning() {
return fmt.Errorf("The instance is already stopped")
}
@@ -511,7 +519,7 @@ func (vm *vmQemu) Shutdown(timeout time.Duration) error {
return nil
}
-func (vm *vmQemu) ovmfPath() string {
+func (vm *VMQemu) ovmfPath() string {
if os.Getenv("LXD_OVMF_PATH") != "" {
return os.Getenv("LXD_OVMF_PATH")
}
@@ -519,7 +527,7 @@ func (vm *vmQemu) ovmfPath() string {
return "/usr/share/OVMF"
}
-func (vm *vmQemu) Start(stateful bool) error {
+func (vm *VMQemu) Start(stateful bool) error {
// Ensure the correct vhost_vsock kernel module is loaded before establishing the vsock.
err := util.LoadModule("vhost_vsock")
if err != nil {
@@ -668,7 +676,7 @@ func (vm *vmQemu) Start(stateful bool) error {
return nil
}
-func (vm *vmQemu) setupNvram() error {
+func (vm *VMQemu) setupNvram() error {
srcOvmfFile := filepath.Join(vm.ovmfPath(), "OVMF_VARS.fd")
if vm.expandedConfig["security.secureboot"] == "" || shared.IsTrue(vm.expandedConfig["security.secureboot"]) {
srcOvmfFile = filepath.Join(vm.ovmfPath(), "OVMF_VARS.ms.fd")
@@ -687,7 +695,7 @@ func (vm *vmQemu) setupNvram() error {
return nil
}
-func (vm *vmQemu) qemuArchConfig() (string, string, string, error) {
+func (vm *VMQemu) qemuArchConfig() (string, string, string, error) {
if vm.architecture == osarch.ARCH_64BIT_INTEL_X86 {
conf := `
[global]
@@ -710,7 +718,7 @@ value = "1"
// deviceVolatileGetFunc returns a function that retrieves a named device's volatile config and
// removes its device prefix from the keys.
-func (vm *vmQemu) deviceVolatileGetFunc(devName string) func() map[string]string {
+func (vm *VMQemu) deviceVolatileGetFunc(devName string) func() map[string]string {
return func() map[string]string {
volatile := make(map[string]string)
prefix := fmt.Sprintf("volatile.%s.", devName)
@@ -725,7 +733,7 @@ func (vm *vmQemu) deviceVolatileGetFunc(devName string) func() map[string]string
// deviceVolatileSetFunc returns a function that can be called to save a named device's volatile
// config using keys that do not have the device's name prefixed.
-func (vm *vmQemu) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
+func (vm *VMQemu) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
return func(save map[string]string) error {
volatileSave := make(map[string]string)
for k, v := range save {
@@ -737,7 +745,7 @@ func (vm *vmQemu) deviceVolatileSetFunc(devName string) func(save map[string]str
}
// deviceLoad instantiates and validates a new device and returns it along with enriched config.
-func (vm *vmQemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
+func (vm *VMQemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
var configCopy deviceConfig.Device
var err error
@@ -761,7 +769,7 @@ func (vm *vmQemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (
// deviceStart loads a new device and calls its Start() function. After processing the runtime
// config returned from Start(), it also runs the device's Register() function irrespective of
// whether the instance is running or not.
-func (vm *vmQemu) deviceStart(deviceName string, rawConfig deviceConfig.Device, isRunning bool) (*deviceConfig.RunConfig, error) {
+func (vm *VMQemu) deviceStart(deviceName string, rawConfig deviceConfig.Device, isRunning bool) (*deviceConfig.RunConfig, error) {
d, _, err := vm.deviceLoad(deviceName, rawConfig)
if err != nil {
return nil, err
@@ -780,7 +788,7 @@ func (vm *vmQemu) deviceStart(deviceName string, rawConfig deviceConfig.Device,
}
// deviceStop loads a new device and calls its Stop() function.
-func (vm *vmQemu) deviceStop(deviceName string, rawConfig deviceConfig.Device) error {
+func (vm *VMQemu) deviceStop(deviceName string, rawConfig deviceConfig.Device) error {
d, _, err := vm.deviceLoad(deviceName, rawConfig)
// If deviceLoad fails with unsupported device type then return.
@@ -825,7 +833,7 @@ func (vm *vmQemu) deviceStop(deviceName string, rawConfig deviceConfig.Device) e
}
// runHooks executes the callback functions returned from a function.
-func (vm *vmQemu) runHooks(hooks []func() error) error {
+func (vm *VMQemu) runHooks(hooks []func() error) error {
// Run any post start hooks.
if len(hooks) > 0 {
for _, hook := range hooks {
@@ -839,18 +847,18 @@ func (vm *vmQemu) runHooks(hooks []func() error) error {
return nil
}
-func (vm *vmQemu) getMonitorPath() string {
+func (vm *VMQemu) getMonitorPath() string {
return filepath.Join(vm.LogPath(), "qemu.monitor")
}
-func (vm *vmQemu) getNvramPath() string {
+func (vm *VMQemu) getNvramPath() string {
return filepath.Join(vm.Path(), "qemu.nvram")
}
// generateConfigShare generates the config share directory that will be exported to the VM via
// a 9P share. Due to the unknown size of templates inside the images this directory is created
// inside the VM's config volume so that it can be restricted by quota.
-func (vm *vmQemu) generateConfigShare() error {
+func (vm *VMQemu) generateConfigShare() error {
// Mount the instance's config volume if needed.
ourMount, err := vm.mount()
if err != nil {
@@ -1036,7 +1044,7 @@ echo "To start it now, unmount this filesystem and run: systemctl start lxd-agen
// generateQemuConfigFile writes the qemu config file and returns its location.
// It writes the config file inside the VM's log path.
-func (vm *vmQemu) generateQemuConfigFile(qemuType string, qemuConf string, devConfs []*deviceConfig.RunConfig) (string, error) {
+func (vm *VMQemu) generateQemuConfigFile(qemuType string, qemuConf string, devConfs []*deviceConfig.RunConfig) (string, error) {
var sb *strings.Builder = &strings.Builder{}
// Base config. This is common for all VMs and has no variables in it.
@@ -1163,7 +1171,7 @@ backend = "pty"
}
// addMemoryConfig adds the qemu config required for setting the size of the VM's memory.
-func (vm *vmQemu) addMemoryConfig(sb *strings.Builder) error {
+func (vm *VMQemu) addMemoryConfig(sb *strings.Builder) error {
// Configure memory limit.
memSize := vm.expandedConfig["limits.memory"]
if memSize == "" {
@@ -1185,7 +1193,7 @@ size = "%dB"
}
// addVsockConfig adds the qemu config required for setting up the host->VM vsock socket.
-func (vm *vmQemu) addVsockConfig(sb *strings.Builder) {
+func (vm *VMQemu) addVsockConfig(sb *strings.Builder) {
vsockID := vm.vsockID()
sb.WriteString(fmt.Sprintf(`
@@ -1208,7 +1216,7 @@ addr = "0x0"
}
// addCPUConfig adds the qemu config required for setting the number of virtualised CPUs.
-func (vm *vmQemu) addCPUConfig(sb *strings.Builder) error {
+func (vm *VMQemu) addCPUConfig(sb *strings.Builder) error {
// Configure CPU limit. TODO add control of sockets, cores and threads.
cpus := vm.expandedConfig["limits.cpu"]
if cpus == "" {
@@ -1233,7 +1241,7 @@ cpus = "%d"
}
// addMonitorConfig adds the qemu config required for setting up the host side VM monitor device.
-func (vm *vmQemu) addMonitorConfig(sb *strings.Builder) {
+func (vm *VMQemu) addMonitorConfig(sb *strings.Builder) {
monitorPath := vm.getMonitorPath()
sb.WriteString(fmt.Sprintf(`
@@ -1253,7 +1261,7 @@ mode = "control"
}
// addFirmwareConfig adds the qemu config required for adding a secure boot compatible EFI firmware.
-func (vm *vmQemu) addFirmwareConfig(sb *strings.Builder) {
+func (vm *VMQemu) addFirmwareConfig(sb *strings.Builder) {
nvramPath := vm.getNvramPath()
sb.WriteString(fmt.Sprintf(`
@@ -1277,7 +1285,7 @@ unit = "1"
}
// addConfDriveConfig adds the qemu config required for adding the config drive.
-func (vm *vmQemu) addConfDriveConfig(sb *strings.Builder) {
+func (vm *VMQemu) addConfDriveConfig(sb *strings.Builder) {
// Devices use "qemu_" prefix indicating that this is a internally named device.
sb.WriteString(fmt.Sprintf(`
# Config drive
@@ -1297,7 +1305,7 @@ mount_tag = "config"
}
// addRootDriveConfig adds the qemu config required for adding the root drive.
-func (vm *vmQemu) addRootDriveConfig(sb *strings.Builder) error {
+func (vm *VMQemu) addRootDriveConfig(sb *strings.Builder) error {
pool, err := vm.getStoragePool()
if err != nil {
return err
@@ -1332,7 +1340,7 @@ bootindex = "1"
}
// addDriveConfig adds the qemu config required for adding a supplementary drive.
-func (vm *vmQemu) addDriveConfig(sb *strings.Builder, driveIndex int, driveConf deviceConfig.MountEntryItem) {
+func (vm *VMQemu) addDriveConfig(sb *strings.Builder, driveIndex int, driveConf deviceConfig.MountEntryItem) {
driveName := fmt.Sprintf(driveConf.TargetPath)
// Devices use "lxd_" prefix indicating that this is a user named device.
@@ -1358,7 +1366,7 @@ drive = "lxd_%s"
}
// addNetDevConfig adds the qemu config required for adding a network device.
-func (vm *vmQemu) addNetDevConfig(sb *strings.Builder, nicConfig []deviceConfig.RunConfigItem) {
+func (vm *VMQemu) addNetDevConfig(sb *strings.Builder, nicConfig []deviceConfig.RunConfigItem) {
var devName, devTap, devHwaddr string
for _, nicItem := range nicConfig {
if nicItem.Key == "name" {
@@ -1399,12 +1407,12 @@ bootindex = "2""
}
// pidFilePath returns the path where the qemu process should write its PID.
-func (vm *vmQemu) pidFilePath() string {
+func (vm *VMQemu) pidFilePath() string {
return filepath.Join(vm.LogPath(), "qemu.pid")
}
// pid gets the PID of the running qemu process.
-func (vm *vmQemu) pid() (int, error) {
+func (vm *VMQemu) pid() (int, error) {
pidStr, err := ioutil.ReadFile(vm.pidFilePath())
if os.IsNotExist(err) {
return 0, nil
@@ -1423,7 +1431,7 @@ func (vm *vmQemu) pid() (int, error) {
}
// Stop stops the VM.
-func (vm *vmQemu) Stop(stateful bool) error {
+func (vm *VMQemu) Stop(stateful bool) error {
if stateful {
return fmt.Errorf("Stateful stop isn't supported for VMs at this time")
}
@@ -1465,7 +1473,7 @@ func (vm *vmQemu) Stop(stateful bool) error {
return nil
}
-func (vm *vmQemu) Unfreeze() error {
+func (vm *VMQemu) Unfreeze() error {
// Connect to the monitor.
monitor, err := qmp.Connect(vm.getMonitorPath(), vm.getMonitorEventHandler())
if err != nil {
@@ -1481,28 +1489,28 @@ func (vm *vmQemu) Unfreeze() error {
return nil
}
-func (vm *vmQemu) IsPrivileged() bool {
+func (vm *VMQemu) IsPrivileged() bool {
// Privileged mode doesn't apply to virtual machines
return false
}
-func (vm *vmQemu) Restore(source instance.Instance, stateful bool) error {
+func (vm *VMQemu) Restore(source instance.Instance, stateful bool) error {
return fmt.Errorf("Restore Not implemented")
}
-func (vm *vmQemu) Snapshots() ([]instance.Instance, error) {
+func (vm *VMQemu) Snapshots() ([]instance.Instance, error) {
return []instance.Instance{}, nil
}
-func (vm *vmQemu) Backups() ([]backup.Backup, error) {
+func (vm *VMQemu) Backups() ([]backup.Backup, error) {
return []backup.Backup{}, nil
}
-func (vm *vmQemu) Rename(newName string) error {
+func (vm *VMQemu) Rename(newName string) error {
return fmt.Errorf("Rename Not implemented")
}
-func (vm *vmQemu) Update(args db.InstanceArgs, userRequested bool) error {
+func (vm *VMQemu) Update(args db.InstanceArgs, userRequested bool) error {
if vm.IsRunning() {
return fmt.Errorf("Update whilst running not supported")
}
@@ -1529,13 +1537,13 @@ func (vm *vmQemu) Update(args db.InstanceArgs, userRequested bool) error {
}
// Validate the new config.
- err := containerValidConfig(vm.state.OS, args.Config, false, false)
+ err := instance.ValidConfig(vm.state.OS, args.Config, false, false)
if err != nil {
return errors.Wrap(err, "Invalid config")
}
// Validate the new devices without using expanded devices validation (expensive checks disabled).
- err = instanceValidDevices(vm.state, vm.state.Cluster, vm.Type(), vm.Name(), args.Devices, false)
+ err = instance.ValidDevices(vm.state, vm.state.Cluster, vm.Type(), vm.Name(), args.Devices, false)
if err != nil {
return errors.Wrap(err, "Invalid devices")
}
@@ -1713,13 +1721,13 @@ func (vm *vmQemu) Update(args db.InstanceArgs, userRequested bool) error {
})
// Do some validation of the config diff.
- err = containerValidConfig(vm.state.OS, vm.expandedConfig, false, true)
+ err = instance.ValidConfig(vm.state.OS, vm.expandedConfig, false, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded config")
}
// Do full expanded validation of the devices diff.
- err = instanceValidDevices(vm.state, vm.state.Cluster, vm.Type(), vm.Name(), vm.expandedDevices, true)
+ err = instance.ValidDevices(vm.state, vm.state.Cluster, vm.Type(), vm.Name(), vm.expandedDevices, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded devices")
}
@@ -1810,7 +1818,7 @@ func (vm *vmQemu) Update(args db.InstanceArgs, userRequested bool) error {
return errors.Wrap(err, "Failed to update database")
}
- err = writeBackupFile(vm)
+ err = instance.WriteBackupFile(vm.state, vm)
if err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "Failed to write backup file")
}
@@ -1831,7 +1839,7 @@ func (vm *vmQemu) Update(args db.InstanceArgs, userRequested bool) error {
return nil
}
-func (vm *vmQemu) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices) error {
+func (vm *VMQemu) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices) error {
isRunning := vm.IsRunning()
// Remove devices in reverse order to how they were added.
@@ -1887,7 +1895,7 @@ func (vm *vmQemu) updateDevices(removeDevices deviceConfig.Devices, addDevices d
}
// deviceUpdate loads a new device and calls its Update() function.
-func (vm *vmQemu) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, isRunning bool) error {
+func (vm *VMQemu) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, isRunning bool) error {
d, _, err := vm.deviceLoad(deviceName, rawConfig)
if err != nil {
return err
@@ -1903,7 +1911,7 @@ func (vm *vmQemu) deviceUpdate(deviceName string, rawConfig deviceConfig.Device,
// deviceResetVolatile resets a device's volatile data when its removed or updated in such a way
// that it is removed then added immediately afterwards.
-func (vm *vmQemu) deviceResetVolatile(devName string, oldConfig, newConfig deviceConfig.Device) error {
+func (vm *VMQemu) deviceResetVolatile(devName string, oldConfig, newConfig deviceConfig.Device) error {
volatileClear := make(map[string]string)
devicePrefix := fmt.Sprintf("volatile.%s.", devName)
@@ -1939,7 +1947,7 @@ func (vm *vmQemu) deviceResetVolatile(devName string, oldConfig, newConfig devic
return vm.VolatileSet(volatileClear)
}
-func (vm *vmQemu) removeUnixDevices() error {
+func (vm *VMQemu) removeUnixDevices() error {
// Check that we indeed have devices to remove.
if !shared.PathExists(vm.DevicesPath()) {
return nil
@@ -1969,7 +1977,7 @@ func (vm *vmQemu) removeUnixDevices() error {
return nil
}
-func (vm *vmQemu) removeDiskDevices() error {
+func (vm *VMQemu) removeDiskDevices() error {
// Check that we indeed have devices to remove.vm
if !shared.PathExists(vm.DevicesPath()) {
return nil
@@ -2002,7 +2010,7 @@ func (vm *vmQemu) removeDiskDevices() error {
return nil
}
-func (vm *vmQemu) cleanup() {
+func (vm *VMQemu) cleanup() {
// Unmount any leftovers
vm.removeUnixDevices()
vm.removeDiskDevices()
@@ -2015,7 +2023,7 @@ func (vm *vmQemu) cleanup() {
}
// cleanupDevices performs any needed device cleanup steps when instance is stopped.
-func (vm *vmQemu) cleanupDevices() {
+func (vm *VMQemu) cleanupDevices() {
for _, dev := range vm.expandedDevices.Sorted() {
// Use the device interface if device supports it.
err := vm.deviceStop(dev.Name, dev.Config)
@@ -2027,7 +2035,7 @@ func (vm *vmQemu) cleanupDevices() {
}
}
-func (vm *vmQemu) init() error {
+func (vm *VMQemu) init() error {
// Compute the expanded config and device list.
err := vm.expandConfig(nil)
if err != nil {
@@ -2042,7 +2050,7 @@ func (vm *vmQemu) init() error {
return nil
}
-func (vm *vmQemu) Delete() error {
+func (vm *VMQemu) Delete() error {
ctxMap := log.Ctx{
"project": vm.project,
"name": vm.name,
@@ -2106,7 +2114,7 @@ func (vm *vmQemu) Delete() error {
} else {
// Remove all snapshots by initialising each snapshot as an Instance and
// calling its Delete function.
- err := instanceDeleteSnapshots(vm.state, vm.Project(), vm.Name())
+ err := instance.DeleteSnapshots(vm.state, vm.Project(), vm.Name())
if err != nil {
return err
}
@@ -2176,27 +2184,27 @@ func (vm *vmQemu) Delete() error {
return nil
}
-func (vm *vmQemu) deviceAdd(deviceName string, rawConfig deviceConfig.Device) error {
+func (vm *VMQemu) deviceAdd(deviceName string, rawConfig deviceConfig.Device) error {
return nil
}
-func (vm *vmQemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device) error {
+func (vm *VMQemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device) error {
return nil
}
-func (vm *vmQemu) Export(w io.Writer, properties map[string]string) error {
+func (vm *VMQemu) Export(w io.Writer, properties map[string]string) error {
return fmt.Errorf("Export Not implemented")
}
-func (vm *vmQemu) CGroupGet(key string) (string, error) {
+func (vm *VMQemu) CGroupGet(key string) (string, error) {
return "", fmt.Errorf("CGroupGet Not implemented")
}
-func (vm *vmQemu) CGroupSet(key string, value string) error {
+func (vm *VMQemu) CGroupSet(key string, value string) error {
return fmt.Errorf("CGroupSet Not implemented")
}
-func (vm *vmQemu) VolatileSet(changes map[string]string) error {
+func (vm *VMQemu) VolatileSet(changes map[string]string) error {
// Sanity check.
for key := range changes {
if !strings.HasPrefix(key, "volatile.") {
@@ -2234,11 +2242,11 @@ func (vm *vmQemu) VolatileSet(changes map[string]string) error {
return nil
}
-func (vm *vmQemu) FileExists(path string) error {
+func (vm *VMQemu) FileExists(path string) error {
return fmt.Errorf("FileExists Not implemented")
}
-func (vm *vmQemu) FilePull(srcPath string, dstPath string) (int64, int64, os.FileMode, string, []string, error) {
+func (vm *VMQemu) FilePull(srcPath string, dstPath string) (int64, int64, os.FileMode, string, []string, error) {
client, err := vm.getAgentClient()
if err != nil {
return 0, 0, 0, "", nil, err
@@ -2281,7 +2289,7 @@ func (vm *vmQemu) FilePull(srcPath string, dstPath string) (int64, int64, os.Fil
return 0, 0, 0, "", nil, fmt.Errorf("bad file type %s", resp.Type)
}
-func (vm *vmQemu) FilePush(fileType string, srcPath string, dstPath string, uid int64, gid int64, mode int, write string) error {
+func (vm *VMQemu) FilePush(fileType string, srcPath string, dstPath string, uid int64, gid int64, mode int, write string) error {
client, err := vm.getAgentClient()
if err != nil {
return err
@@ -2327,7 +2335,7 @@ func (vm *vmQemu) FilePush(fileType string, srcPath string, dstPath string, uid
return nil
}
-func (vm *vmQemu) FileRemove(path string) error {
+func (vm *VMQemu) FileRemove(path string) error {
// Connect to the agent.
client, err := vm.getAgentClient()
if err != nil {
@@ -2349,7 +2357,7 @@ func (vm *vmQemu) FileRemove(path string) error {
return nil
}
-func (vm *vmQemu) Console() (*os.File, chan error, error) {
+func (vm *VMQemu) Console() (*os.File, chan error, error) {
chDisconnect := make(chan error, 1)
// Avoid duplicate connects.
@@ -2389,7 +2397,7 @@ func (vm *vmQemu) Console() (*os.File, chan error, error) {
return console, chDisconnect, nil
}
-func (vm *vmQemu) forwardSignal(control *websocket.Conn, sig unix.Signal) error {
+func (vm *VMQemu) forwardSignal(control *websocket.Conn, sig unix.Signal) error {
logger.Debugf("Forwarding signal to lxd-agent: %s", sig)
w, err := control.NextWriter(websocket.TextMessage)
@@ -2411,7 +2419,7 @@ func (vm *vmQemu) forwardSignal(control *websocket.Conn, sig unix.Signal) error
return err
}
-func (vm *vmQemu) Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, cwd string, uid uint32, gid uint32) (instance.Cmd, error) {
+func (vm *VMQemu) Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, cwd string, uid uint32, gid uint32) (instance.Cmd, error) {
var instCmd *VMQemuCmd
// Because this function will exit before the remote command has finished, we create a
@@ -2513,7 +2521,7 @@ func (vm *vmQemu) Exec(command []string, env map[string]string, stdin *os.File,
return instCmd, nil
}
-func (vm *vmQemu) Render() (interface{}, interface{}, error) {
+func (vm *VMQemu) Render() (interface{}, interface{}, error) {
// Ignore err as the arch string on error is correct (unknown)
architectureName, _ := osarch.ArchitectureName(vm.architecture)
@@ -2565,7 +2573,7 @@ func (vm *vmQemu) Render() (interface{}, interface{}, error) {
return &vmState, etag, nil
}
-func (vm *vmQemu) RenderFull() (*api.InstanceFull, interface{}, error) {
+func (vm *VMQemu) RenderFull() (*api.InstanceFull, interface{}, error) {
if vm.IsSnapshot() {
return nil, nil, fmt.Errorf("RenderFull doesn't work with snapshots")
}
@@ -2623,14 +2631,14 @@ func (vm *vmQemu) RenderFull() (*api.InstanceFull, interface{}, error) {
return &vmState, etag, nil
}
-func (vm *vmQemu) RenderState() (*api.InstanceState, error) {
+func (vm *VMQemu) RenderState() (*api.InstanceState, error) {
statusCode := vm.statusCode()
pid, _ := vm.pid()
if statusCode == api.Running {
status, err := vm.agentGetState()
if err != nil {
- if err != vmQemuAgentOfflineErr {
+ if err != errVMQemuAgentOffline {
logger.Warn("Could not get VM state from agent", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
}
@@ -2651,7 +2659,7 @@ func (vm *vmQemu) RenderState() (*api.InstanceState, error) {
}
// Parse the lease file.
- addresses, err := networkGetLeaseAddresses(vm.state, m["parent"], m["hwaddr"])
+ addresses, err := instance.NetworkGetLeaseAddresses(vm.state, m["parent"], m["hwaddr"])
if err != nil {
return nil, err
}
@@ -2711,7 +2719,7 @@ func (vm *vmQemu) RenderState() (*api.InstanceState, error) {
// agentGetState connects to the agent inside of the VM and does
// an API call to get the current state.
-func (vm *vmQemu) agentGetState() (*api.InstanceState, error) {
+func (vm *VMQemu) agentGetState() (*api.InstanceState, error) {
// Check if the agent is running.
monitor, err := qmp.Connect(vm.getMonitorPath(), vm.getMonitorEventHandler())
if err != nil {
@@ -2719,7 +2727,7 @@ func (vm *vmQemu) agentGetState() (*api.InstanceState, error) {
}
if !monitor.AgentReady() {
- return nil, vmQemuAgentOfflineErr
+ return nil, errVMQemuAgentOffline
}
client, err := vm.getAgentClient()
@@ -2741,72 +2749,72 @@ func (vm *vmQemu) agentGetState() (*api.InstanceState, error) {
return status, nil
}
-func (vm *vmQemu) IsRunning() bool {
+func (vm *VMQemu) IsRunning() bool {
state := vm.State()
return state != "BROKEN" && state != "STOPPED"
}
-func (vm *vmQemu) IsFrozen() bool {
+func (vm *VMQemu) IsFrozen() bool {
return vm.State() == "FROZEN"
}
-func (vm *vmQemu) IsEphemeral() bool {
+func (vm *VMQemu) IsEphemeral() bool {
return vm.ephemeral
}
-func (vm *vmQemu) IsSnapshot() bool {
+func (vm *VMQemu) IsSnapshot() bool {
return vm.snapshot
}
-func (vm *vmQemu) IsStateful() bool {
+func (vm *VMQemu) IsStateful() bool {
return vm.stateful
}
-func (vm *vmQemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
+func (vm *VMQemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
return fmt.Errorf("DeviceEventHandler Not implemented")
}
-func (vm *vmQemu) ID() int {
+func (vm *VMQemu) ID() int {
return vm.id
}
// vsockID returns the vsock context ID, 3 being the first ID that can be used.
-func (vm *vmQemu) vsockID() int {
+func (vm *VMQemu) vsockID() int {
return vm.id + 3
}
-func (vm *vmQemu) Location() string {
+func (vm *VMQemu) Location() string {
return vm.node
}
-func (vm *vmQemu) Project() string {
+func (vm *VMQemu) Project() string {
return vm.project
}
-func (vm *vmQemu) Name() string {
+func (vm *VMQemu) Name() string {
return vm.name
}
-func (vm *vmQemu) Type() instancetype.Type {
+func (vm *VMQemu) Type() instancetype.Type {
return vm.dbType
}
-func (vm *vmQemu) Description() string {
+func (vm *VMQemu) Description() string {
return vm.description
}
-func (vm *vmQemu) Architecture() int {
+func (vm *VMQemu) Architecture() int {
return vm.architecture
}
-func (vm *vmQemu) CreationDate() time.Time {
+func (vm *VMQemu) CreationDate() time.Time {
return vm.creationDate
}
-func (vm *vmQemu) LastUsedDate() time.Time {
+func (vm *VMQemu) LastUsedDate() time.Time {
return vm.lastUsedDate
}
-func (vm *vmQemu) expandConfig(profiles []api.Profile) error {
+func (vm *VMQemu) expandConfig(profiles []api.Profile) error {
if profiles == nil && len(vm.profiles) > 0 {
var err error
profiles, err = vm.state.Cluster.ProfilesGet(vm.project, vm.profiles)
@@ -2820,7 +2828,7 @@ func (vm *vmQemu) expandConfig(profiles []api.Profile) error {
return nil
}
-func (vm *vmQemu) expandDevices(profiles []api.Profile) error {
+func (vm *VMQemu) expandDevices(profiles []api.Profile) error {
if profiles == nil && len(vm.profiles) > 0 {
var err error
profiles, err = vm.state.Cluster.ProfilesGet(vm.project, vm.profiles)
@@ -2834,32 +2842,32 @@ func (vm *vmQemu) expandDevices(profiles []api.Profile) error {
return nil
}
-func (vm *vmQemu) ExpandedConfig() map[string]string {
+func (vm *VMQemu) ExpandedConfig() map[string]string {
return vm.expandedConfig
}
-func (vm *vmQemu) ExpandedDevices() deviceConfig.Devices {
+func (vm *VMQemu) ExpandedDevices() deviceConfig.Devices {
return vm.expandedDevices
}
-func (vm *vmQemu) LocalConfig() map[string]string {
+func (vm *VMQemu) LocalConfig() map[string]string {
return vm.localConfig
}
-func (vm *vmQemu) LocalDevices() deviceConfig.Devices {
+func (vm *VMQemu) LocalDevices() deviceConfig.Devices {
return vm.localDevices
}
-func (vm *vmQemu) Profiles() []string {
+func (vm *VMQemu) Profiles() []string {
return vm.profiles
}
-func (vm *vmQemu) InitPID() int {
+func (vm *VMQemu) InitPID() int {
pid, _ := vm.pid()
return pid
}
-func (vm *vmQemu) statusCode() api.StatusCode {
+func (vm *VMQemu) statusCode() api.StatusCode {
// Connect to the monitor.
monitor, err := qmp.Connect(vm.getMonitorPath(), vm.getMonitorEventHandler())
if err != nil {
@@ -2885,11 +2893,11 @@ func (vm *vmQemu) statusCode() api.StatusCode {
return api.Stopped
}
-func (vm *vmQemu) State() string {
+func (vm *VMQemu) State() string {
return strings.ToUpper(vm.statusCode().String())
}
-func (vm *vmQemu) ExpiryDate() time.Time {
+func (vm *VMQemu) ExpiryDate() time.Time {
if vm.IsSnapshot() {
return vm.expiryDate
}
@@ -2898,46 +2906,46 @@ func (vm *vmQemu) ExpiryDate() time.Time {
return time.Time{}
}
-func (vm *vmQemu) Path() string {
+func (vm *VMQemu) Path() string {
return storagePools.InstancePath(vm.Type(), vm.Project(), vm.Name(), vm.IsSnapshot())
}
-func (vm *vmQemu) DevicesPath() string {
+func (vm *VMQemu) DevicesPath() string {
name := project.Prefix(vm.Project(), vm.Name())
return shared.VarPath("devices", name)
}
-func (vm *vmQemu) ShmountsPath() string {
+func (vm *VMQemu) ShmountsPath() string {
name := project.Prefix(vm.Project(), vm.Name())
return shared.VarPath("shmounts", name)
}
-func (vm *vmQemu) LogPath() string {
+func (vm *VMQemu) LogPath() string {
name := project.Prefix(vm.Project(), vm.Name())
return shared.LogPath(name)
}
-func (vm *vmQemu) LogFilePath() string {
+func (vm *VMQemu) LogFilePath() string {
return filepath.Join(vm.LogPath(), "lxvm.log")
}
-func (vm *vmQemu) ConsoleBufferLogPath() string {
+func (vm *VMQemu) ConsoleBufferLogPath() string {
return filepath.Join(vm.LogPath(), "console.log")
}
-func (vm *vmQemu) RootfsPath() string {
+func (vm *VMQemu) RootfsPath() string {
return filepath.Join(vm.Path(), "rootfs")
}
-func (vm *vmQemu) TemplatesPath() string {
+func (vm *VMQemu) TemplatesPath() string {
return filepath.Join(vm.Path(), "templates")
}
-func (vm *vmQemu) StatePath() string {
+func (vm *VMQemu) StatePath() string {
return filepath.Join(vm.Path(), "state")
}
-func (vm *vmQemu) StoragePool() (string, error) {
+func (vm *VMQemu) StoragePool() (string, error) {
poolName, err := vm.state.Cluster.InstancePool(vm.Project(), vm.Name())
if err != nil {
return "", err
@@ -2946,25 +2954,25 @@ func (vm *vmQemu) StoragePool() (string, error) {
return poolName, nil
}
-func (vm *vmQemu) SetOperation(op *operations.Operation) {
+func (vm *VMQemu) SetOperation(op *operations.Operation) {
vm.op = op
}
// StorageStart deprecated.
-func (vm *vmQemu) StorageStart() (bool, error) {
+func (vm *VMQemu) StorageStart() (bool, error) {
return false, storagePools.ErrNotImplemented
}
// StorageStop deprecated.
-func (vm *vmQemu) StorageStop() (bool, error) {
+func (vm *VMQemu) StorageStop() (bool, error) {
return false, storagePools.ErrNotImplemented
}
-func (vm *vmQemu) DeferTemplateApply(trigger string) error {
+func (vm *VMQemu) DeferTemplateApply(trigger string) error {
return nil
}
-func (vm *vmQemu) DaemonState() *state.State {
+func (vm *VMQemu) DaemonState() *state.State {
// FIXME: This function should go away, since the abstract instance
// interface should not be coupled with internal state details.
// However this is not currently possible, because many
@@ -2976,7 +2984,7 @@ func (vm *vmQemu) DaemonState() *state.State {
// fillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
// generated name and hwaddr properties if these are missing from the device.
-func (vm *vmQemu) fillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
+func (vm *VMQemu) fillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
var err error
newDevice := m.Clone()
@@ -3006,7 +3014,7 @@ func (vm *vmQemu) fillNetworkDevice(name string, m deviceConfig.Device) (deviceC
volatileHwaddr := vm.localConfig[configKey]
if volatileHwaddr == "" {
// Generate a new MAC address
- volatileHwaddr, err = deviceNextInterfaceHWAddr()
+ volatileHwaddr, err = instance.DeviceNextInterfaceHWAddr()
if err != nil {
return nil, err
}
@@ -3041,7 +3049,7 @@ func (vm *vmQemu) fillNetworkDevice(name string, m deviceConfig.Device) (deviceC
}
// Internal MAAS handling.
-func (vm *vmQemu) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
+func (vm *VMQemu) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
interfaces := []maas.ContainerInterface{}
for k, m := range devices {
if m["type"] != "nic" {
@@ -3091,7 +3099,7 @@ func (vm *vmQemu) maasInterfaces(devices map[string]map[string]string) ([]maas.C
return interfaces, nil
}
-func (vm *vmQemu) maasDelete() error {
+func (vm *VMQemu) maasDelete() error {
maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
if err != nil {
return err
@@ -3126,7 +3134,7 @@ func (vm *vmQemu) maasDelete() error {
return vm.state.MAAS.DeleteContainer(project.Prefix(vm.project, vm.name))
}
-func (vm *vmQemu) maasUpdate(oldDevices map[string]map[string]string) error {
+func (vm *VMQemu) maasUpdate(oldDevices map[string]map[string]string) error {
// Check if MAAS is configured
maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
if err != nil {
From 48c9b697ddcb99d991b0db6c32bfab274269ed35 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 10:57:47 +0000
Subject: [PATCH 3/9] lxd/instance/vqemu/vm/qemu/cmd: Updates to be in own
package
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/instance/vmqemu/vm_qemu_cmd.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/lxd/instance/vmqemu/vm_qemu_cmd.go b/lxd/instance/vmqemu/vm_qemu_cmd.go
index db47792bbd..e344c1473d 100644
--- a/lxd/instance/vmqemu/vm_qemu_cmd.go
+++ b/lxd/instance/vmqemu/vm_qemu_cmd.go
@@ -1,4 +1,4 @@
-package main
+package vmqemu
import (
"golang.org/x/sys/unix"
From 5629ed4c8b70ce052621d811eab6d7473e18f74a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 11:58:33 +0000
Subject: [PATCH 4/9] lxd/networks: networkGetLeaseAddresses into instance
package
This limits the scope of the changes needed to get vmqemu into own package.
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/networks.go | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/lxd/networks.go b/lxd/networks.go
index 057e3dc53d..160ef64cd5 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -23,6 +23,7 @@ import (
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/device"
"github.com/lxc/lxd/lxd/dnsmasq"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/iptables"
"github.com/lxc/lxd/lxd/node"
@@ -35,6 +36,11 @@ import (
"github.com/lxc/lxd/shared/version"
)
+func init() {
+ // Link networkGetLeaseAddresses into instance package.
+ instance.NetworkGetLeaseAddresses = networkGetLeaseAddresses
+}
+
// Lock to prevent concurent networks creation
var networkCreateLock sync.Mutex
From e55b32f6734cb30021789ec6926e3a8b579b97c9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 11:59:20 +0000
Subject: [PATCH 5/9] lxd/network: Stops fillNetworkDevice during DHCP leases
scan for vmqemu
Why do we need to do this in networkLeasesGet, shouldnt the MAC address be generated on VM creation?
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/networks.go | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/lxd/networks.go b/lxd/networks.go
index 160ef64cd5..945dcabb17 100644
--- a/lxd/networks.go
+++ b/lxd/networks.go
@@ -755,12 +755,14 @@ func networkLeasesGet(d *Daemon, r *http.Request) response.Response {
if err != nil {
continue
}
- } else if inst.Type() == instancetype.VM {
- d, err = inst.(*vmQemu).fillNetworkDevice(k, d)
+ }
+ /* tomp TODO why hasn't this been done at instance/device create time?
+ else if inst.Type() == instancetype.VM {
+ d, err = inst.(*vmqemu.VMQemu).fillNetworkDevice(k, d)
if err != nil {
continue
}
- }
+ }*/
// Record the MAC
if d["hwaddr"] != "" {
From 01368da183401c1d5b0c98be80d5ecd876aaaf9e Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 12:00:33 +0000
Subject: [PATCH 6/9] vmqemu cont
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/instance/vmqemu/vm_qemu.go | 69 ++++++++++++++++++++++++++++++++--
1 file changed, 65 insertions(+), 4 deletions(-)
diff --git a/lxd/instance/vmqemu/vm_qemu.go b/lxd/instance/vmqemu/vm_qemu.go
index 0b3e56e95d..640a67a4f2 100644
--- a/lxd/instance/vmqemu/vm_qemu.go
+++ b/lxd/instance/vmqemu/vm_qemu.go
@@ -438,6 +438,7 @@ func (vm *VMQemu) generateAgentCert() (string, string, string, string, error) {
return string(agentCert), string(agentKey), string(clientCert), string(clientKey), nil
}
+// Freeze freezes the instance.
func (vm *VMQemu) Freeze() error {
// Connect to the monitor.
monitor, err := qmp.Connect(vm.getMonitorPath(), vm.getMonitorEventHandler())
@@ -454,6 +455,7 @@ func (vm *VMQemu) Freeze() error {
return nil
}
+// OnStop is run when the instance stops.
func (vm *VMQemu) OnStop(target string) error {
vm.cleanupDevices()
os.Remove(vm.pidFilePath())
@@ -473,6 +475,7 @@ func (vm *VMQemu) OnStop(target string) error {
return nil
}
+// Shutdown shuts the instance down.
func (vm *VMQemu) Shutdown(timeout time.Duration) error {
if !vm.IsRunning() {
return fmt.Errorf("The instance is already stopped")
@@ -527,6 +530,7 @@ func (vm *VMQemu) ovmfPath() string {
return "/usr/share/OVMF"
}
+// Start starts the instance.
func (vm *VMQemu) Start(stateful bool) error {
// Ensure the correct vhost_vsock kernel module is loaded before establishing the vsock.
err := util.LoadModule("vhost_vsock")
@@ -1473,6 +1477,7 @@ func (vm *VMQemu) Stop(stateful bool) error {
return nil
}
+// Unfreeze restores the instance to running.
func (vm *VMQemu) Unfreeze() error {
// Connect to the monitor.
monitor, err := qmp.Connect(vm.getMonitorPath(), vm.getMonitorEventHandler())
@@ -1489,27 +1494,32 @@ func (vm *VMQemu) Unfreeze() error {
return nil
}
+// IsPrivileged does not apply to virtual machines. Always returns false.
func (vm *VMQemu) IsPrivileged() bool {
- // Privileged mode doesn't apply to virtual machines
return false
}
+// Restore restores an instance snapshot.
func (vm *VMQemu) Restore(source instance.Instance, stateful bool) error {
return fmt.Errorf("Restore Not implemented")
}
+// Snapshots returns a list of snapshots.
func (vm *VMQemu) Snapshots() ([]instance.Instance, error) {
return []instance.Instance{}, nil
}
+// Backups returns a list of backups.
func (vm *VMQemu) Backups() ([]backup.Backup, error) {
return []backup.Backup{}, nil
}
+// Rename the instance.
func (vm *VMQemu) Rename(newName string) error {
return fmt.Errorf("Rename Not implemented")
}
+// Update the instance config.
func (vm *VMQemu) Update(args db.InstanceArgs, userRequested bool) error {
if vm.IsRunning() {
return fmt.Errorf("Update whilst running not supported")
@@ -2050,6 +2060,7 @@ func (vm *VMQemu) init() error {
return nil
}
+// Delete the instance.
func (vm *VMQemu) Delete() error {
ctxMap := log.Ctx{
"project": vm.project,
@@ -2192,18 +2203,22 @@ func (vm *VMQemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device)
return nil
}
+// Export publishes the instance.
func (vm *VMQemu) Export(w io.Writer, properties map[string]string) error {
return fmt.Errorf("Export Not implemented")
}
+// CGroupGet is not implemented for VMs.
func (vm *VMQemu) CGroupGet(key string) (string, error) {
return "", fmt.Errorf("CGroupGet Not implemented")
}
+// CGroupSet is not implemented for VMs.
func (vm *VMQemu) CGroupSet(key string, value string) error {
return fmt.Errorf("CGroupSet Not implemented")
}
+// VolatileSet sets one or more volatile config keys.
func (vm *VMQemu) VolatileSet(changes map[string]string) error {
// Sanity check.
for key := range changes {
@@ -2242,10 +2257,12 @@ func (vm *VMQemu) VolatileSet(changes map[string]string) error {
return nil
}
+// FileExists is not implemented for VMs.
func (vm *VMQemu) FileExists(path string) error {
return fmt.Errorf("FileExists Not implemented")
}
+// FilePull retrieves a file from the instance.
func (vm *VMQemu) FilePull(srcPath string, dstPath string) (int64, int64, os.FileMode, string, []string, error) {
client, err := vm.getAgentClient()
if err != nil {
@@ -2289,6 +2306,7 @@ func (vm *VMQemu) FilePull(srcPath string, dstPath string) (int64, int64, os.Fil
return 0, 0, 0, "", nil, fmt.Errorf("bad file type %s", resp.Type)
}
+// FilePush pushes a file into the instance.
func (vm *VMQemu) FilePush(fileType string, srcPath string, dstPath string, uid int64, gid int64, mode int, write string) error {
client, err := vm.getAgentClient()
if err != nil {
@@ -2335,6 +2353,7 @@ func (vm *VMQemu) FilePush(fileType string, srcPath string, dstPath string, uid
return nil
}
+// FileRemove removes a file from the instance.
func (vm *VMQemu) FileRemove(path string) error {
// Connect to the agent.
client, err := vm.getAgentClient()
@@ -2357,6 +2376,7 @@ func (vm *VMQemu) FileRemove(path string) error {
return nil
}
+// Console gets access to the instance's console.
func (vm *VMQemu) Console() (*os.File, chan error, error) {
chDisconnect := make(chan error, 1)
@@ -2419,8 +2439,9 @@ func (vm *VMQemu) forwardSignal(control *websocket.Conn, sig unix.Signal) error
return err
}
+// Exec a command inside the instance.
func (vm *VMQemu) Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, cwd string, uid uint32, gid uint32) (instance.Cmd, error) {
- var instCmd *VMQemuCmd
+ var instCmd *Cmd
// Because this function will exit before the remote command has finished, we create a
// cleanup function that will be passed to the instance function if successfully started to
@@ -2509,7 +2530,7 @@ func (vm *VMQemu) Exec(command []string, env map[string]string, stdin *os.File,
return nil, err
}
- instCmd = &VMQemuCmd{
+ instCmd = &Cmd{
cmd: op,
attachedChildPid: -1, // Process is not running on LXD host.
dataDone: args.DataDone,
@@ -2521,6 +2542,7 @@ func (vm *VMQemu) Exec(command []string, env map[string]string, stdin *os.File,
return instCmd, nil
}
+// Render returns info about the instance.
func (vm *VMQemu) Render() (interface{}, interface{}, error) {
// Ignore err as the arch string on error is correct (unknown)
architectureName, _ := osarch.ArchitectureName(vm.architecture)
@@ -2573,6 +2595,7 @@ func (vm *VMQemu) Render() (interface{}, interface{}, error) {
return &vmState, etag, nil
}
+// RenderFull returns all info about the instance.
func (vm *VMQemu) RenderFull() (*api.InstanceFull, interface{}, error) {
if vm.IsSnapshot() {
return nil, nil, fmt.Errorf("RenderFull doesn't work with snapshots")
@@ -2631,6 +2654,7 @@ func (vm *VMQemu) RenderFull() (*api.InstanceFull, interface{}, error) {
return &vmState, etag, nil
}
+// RenderState returns just state info about the instance.
func (vm *VMQemu) RenderState() (*api.InstanceState, error) {
statusCode := vm.statusCode()
pid, _ := vm.pid()
@@ -2749,31 +2773,38 @@ func (vm *VMQemu) agentGetState() (*api.InstanceState, error) {
return status, nil
}
+// IsRunning returns whether or not the instance is running.
func (vm *VMQemu) IsRunning() bool {
state := vm.State()
return state != "BROKEN" && state != "STOPPED"
}
+// IsFrozen returns whether the instance frozen or not.
func (vm *VMQemu) IsFrozen() bool {
return vm.State() == "FROZEN"
}
+// IsEphemeral returns whether the instanc is ephemeral or not.
func (vm *VMQemu) IsEphemeral() bool {
return vm.ephemeral
}
+// IsSnapshot returns whether instance is snapshot or not.
func (vm *VMQemu) IsSnapshot() bool {
return vm.snapshot
}
+// IsStateful retuens whether instance is stateful or not.
func (vm *VMQemu) IsStateful() bool {
return vm.stateful
}
+// DeviceEventHandler handles events occurring on the instance's devices.
func (vm *VMQemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
return fmt.Errorf("DeviceEventHandler Not implemented")
}
+// ID returns the instance's ID.
func (vm *VMQemu) ID() int {
return vm.id
}
@@ -2783,33 +2814,42 @@ func (vm *VMQemu) vsockID() int {
return vm.id + 3
}
+// Location returns instance's location.
func (vm *VMQemu) Location() string {
return vm.node
}
+// Project returns instance's project.
func (vm *VMQemu) Project() string {
return vm.project
}
+// Name returns the instance's name.
func (vm *VMQemu) Name() string {
return vm.name
}
+// Type returns the instance's type.
func (vm *VMQemu) Type() instancetype.Type {
return vm.dbType
}
+// Description returns the instance's description.
func (vm *VMQemu) Description() string {
return vm.description
}
+// Architecture returns the instance's architecture.
func (vm *VMQemu) Architecture() int {
return vm.architecture
}
+// CreationDate returns the instance's creation date.
func (vm *VMQemu) CreationDate() time.Time {
return vm.creationDate
}
+
+// LastUsedDate returns the instance's last used date.
func (vm *VMQemu) LastUsedDate() time.Time {
return vm.lastUsedDate
}
@@ -2842,26 +2882,32 @@ func (vm *VMQemu) expandDevices(profiles []api.Profile) error {
return nil
}
+// ExpandedConfig returns instance's expanded config.
func (vm *VMQemu) ExpandedConfig() map[string]string {
return vm.expandedConfig
}
+// ExpandedDevices returns instance's expanded device config.
func (vm *VMQemu) ExpandedDevices() deviceConfig.Devices {
return vm.expandedDevices
}
+// LocalConfig returns the instance's local config.
func (vm *VMQemu) LocalConfig() map[string]string {
return vm.localConfig
}
+// LocalDevices returns the instance's local device config.
func (vm *VMQemu) LocalDevices() deviceConfig.Devices {
return vm.localDevices
}
+// Profiles returns the instance's profiles.
func (vm *VMQemu) Profiles() []string {
return vm.profiles
}
+// InitPID returns the instance's current process ID.
func (vm *VMQemu) InitPID() int {
pid, _ := vm.pid()
return pid
@@ -2893,10 +2939,12 @@ func (vm *VMQemu) statusCode() api.StatusCode {
return api.Stopped
}
+// State returns the instance's state code.
func (vm *VMQemu) State() string {
return strings.ToUpper(vm.statusCode().String())
}
+// ExpiryDate returns when this snapshot expires.
func (vm *VMQemu) ExpiryDate() time.Time {
if vm.IsSnapshot() {
return vm.expiryDate
@@ -2906,45 +2954,55 @@ func (vm *VMQemu) ExpiryDate() time.Time {
return time.Time{}
}
+// Path returns the instance's path.
func (vm *VMQemu) Path() string {
return storagePools.InstancePath(vm.Type(), vm.Project(), vm.Name(), vm.IsSnapshot())
}
+// DevicesPath returns the instance's devices path.
func (vm *VMQemu) DevicesPath() string {
name := project.Prefix(vm.Project(), vm.Name())
return shared.VarPath("devices", name)
}
+// ShmountsPath returns the instance's shared mounts path.
func (vm *VMQemu) ShmountsPath() string {
name := project.Prefix(vm.Project(), vm.Name())
return shared.VarPath("shmounts", name)
}
+// LogPath returns the instance's log path.
func (vm *VMQemu) LogPath() string {
name := project.Prefix(vm.Project(), vm.Name())
return shared.LogPath(name)
}
+// LogFilePath returns the instance's log path.
func (vm *VMQemu) LogFilePath() string {
- return filepath.Join(vm.LogPath(), "lxvm.log")
+ return filepath.Join(vm.LogPath(), "lxvm.log") // tomp TODO is this correct?
}
+// ConsoleBufferLogPath returns the instance's console buffer log path.
func (vm *VMQemu) ConsoleBufferLogPath() string {
return filepath.Join(vm.LogPath(), "console.log")
}
+// RootfsPath returns the instance's rootfs path.
func (vm *VMQemu) RootfsPath() string {
return filepath.Join(vm.Path(), "rootfs")
}
+// TemplatesPath returns the instance's templates path.
func (vm *VMQemu) TemplatesPath() string {
return filepath.Join(vm.Path(), "templates")
}
+// StatePath returns the instance's state path.
func (vm *VMQemu) StatePath() string {
return filepath.Join(vm.Path(), "state")
}
+// StoragePool returns the name of the instance's storage pool.
func (vm *VMQemu) StoragePool() (string, error) {
poolName, err := vm.state.Cluster.InstancePool(vm.Project(), vm.Name())
if err != nil {
@@ -2954,6 +3012,7 @@ func (vm *VMQemu) StoragePool() (string, error) {
return poolName, nil
}
+// SetOperation sets the current operation.
func (vm *VMQemu) SetOperation(op *operations.Operation) {
vm.op = op
}
@@ -2968,10 +3027,12 @@ func (vm *VMQemu) StorageStop() (bool, error) {
return false, storagePools.ErrNotImplemented
}
+// DeferTemplateApply not used currently.
func (vm *VMQemu) DeferTemplateApply(trigger string) error {
return nil
}
+// DaemonState returns the state of the daemon. Deprecated.
func (vm *VMQemu) DaemonState() *state.State {
// FIXME: This function should go away, since the abstract instance
// interface should not be coupled with internal state details.
From 9fe54b07f6b9882bf294bdfc41d7542322d6367f Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 12:00:48 +0000
Subject: [PATCH 7/9] vm qemu cmd cont
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/instance/vmqemu/vm_qemu_cmd.go | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/lxd/instance/vmqemu/vm_qemu_cmd.go b/lxd/instance/vmqemu/vm_qemu_cmd.go
index e344c1473d..6eac3be652 100644
--- a/lxd/instance/vmqemu/vm_qemu_cmd.go
+++ b/lxd/instance/vmqemu/vm_qemu_cmd.go
@@ -6,8 +6,8 @@ import (
lxdClient "github.com/lxc/lxd/client"
)
-// VMQemuCmd represents a running command for an Qemu VM.
-type VMQemuCmd struct {
+// Cmd represents a running command for an Qemu VM.
+type Cmd struct {
attachedChildPid int
cmd lxdClient.Operation
dataDone chan bool
@@ -17,18 +17,18 @@ type VMQemuCmd struct {
}
// PID returns the attached child's process ID.
-func (c *VMQemuCmd) PID() int {
+func (c *Cmd) PID() int {
return c.attachedChildPid
}
// Signal sends a signal to the command.
-func (c *VMQemuCmd) Signal(sig unix.Signal) error {
+func (c *Cmd) Signal(sig unix.Signal) error {
c.signalSendCh <- sig
return <-c.signalResCh
}
// Wait for the command to end and returns its exit code and any error.
-func (c *VMQemuCmd) Wait() (int, error) {
+func (c *Cmd) Wait() (int, error) {
if c.cleanupFunc != nil {
defer c.cleanupFunc()
}
From dec5b4245cba2fb2459b7aea0d7fdd9392351f69 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 12:01:39 +0000
Subject: [PATCH 8/9] lxd/backup/backup: Adds New() function
Add() allows instance.BackupLoadByName to instantiate a new Backup struct.
Removes LoadByName, as it causes circular imports with instance, will move to instance package as BackupLoadByName.
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/backup/backup.go | 44 ++++++++++++++------------------------------
1 file changed, 14 insertions(+), 30 deletions(-)
diff --git a/lxd/backup/backup.go b/lxd/backup/backup.go
index 8f3fdc1067..c50ad7697a 100644
--- a/lxd/backup/backup.go
+++ b/lxd/backup/backup.go
@@ -9,7 +9,6 @@ import (
"strings"
"time"
- "github.com/pkg/errors"
"gopkg.in/yaml.v2"
"github.com/lxc/lxd/lxd/project"
@@ -18,9 +17,6 @@ import (
"github.com/lxc/lxd/shared/api"
)
-// InstanceLoadByID returns instance config by ID.
-var InstanceLoadByID func(s *state.State, id int) (Instance, error)
-
// Instance represents the backup relevant subset of a LXD instance.
type Instance interface {
Name() string
@@ -124,6 +120,20 @@ type Backup struct {
compressionAlgorithm string
}
+// New instantiates a new Backup struct.
+func New(state *state.State, inst Instance, ID int, name string, creationDate, expiryDate time.Time, instanceOnly, optimizedStorage bool) *Backup {
+ return &Backup{
+ state: state,
+ instance: inst,
+ id: ID,
+ name: name,
+ creationDate: creationDate,
+ expiryDate: expiryDate,
+ instanceOnly: instanceOnly,
+ optimizedStorage: optimizedStorage,
+ }
+}
+
// CompressionAlgorithm returns the compression used for the tarball.
func (b *Backup) CompressionAlgorithm() string {
return b.compressionAlgorithm
@@ -205,32 +215,6 @@ func (b *Backup) Render() *api.InstanceBackup {
}
}
-// LoadByName load a backup from the database.
-func LoadByName(s *state.State, project, name string) (*Backup, error) {
- // Get the backup database record
- args, err := s.Cluster.ContainerGetBackup(project, name)
- if err != nil {
- return nil, errors.Wrap(err, "Load backup from database")
- }
-
- // Load the instance it belongs to
- instance, err := InstanceLoadByID(s, args.InstanceID)
- if err != nil {
- return nil, errors.Wrap(err, "Load container from database")
- }
-
- return &Backup{
- state: s,
- instance: instance,
- id: args.ID,
- name: name,
- creationDate: args.CreationDate,
- expiryDate: args.ExpiryDate,
- instanceOnly: args.InstanceOnly,
- optimizedStorage: args.OptimizedStorage,
- }, nil
-}
-
// DoBackupDelete deletes a backup.
func DoBackupDelete(s *state.State, projectName, backupName, containerName string) error {
backupPath := shared.VarPath("backups", project.Prefix(projectName, backupName))
From 81ff345534e6a9b635a603527b4c74ed23d8ee1e Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Fri, 6 Dec 2019 12:04:26 +0000
Subject: [PATCH 9/9] lxd: Moves instance load and instance validation
functions into instance pkg
Updates usage.
Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
lxd/api_internal.go | 7 +-
lxd/backup.go | 4 +-
lxd/container.go | 204 ++-----------
lxd/container_backup.go | 14 +-
lxd/container_console.go | 6 +-
lxd/container_delete.go | 3 +-
lxd/container_exec.go | 2 +-
lxd/container_file.go | 2 +-
lxd/container_get.go | 3 +-
lxd/container_lxc.go | 306 +------------------
lxd/container_metadata.go | 10 +-
lxd/container_patch.go | 3 +-
lxd/container_post.go | 4 +-
lxd/container_put.go | 7 +-
lxd/container_snapshot.go | 6 +-
lxd/container_state.go | 5 +-
lxd/container_test.go | 3 +-
lxd/containers.go | 24 --
lxd/containers_post.go | 8 +-
lxd/devices.go | 21 --
lxd/devlxd.go | 2 +-
lxd/images.go | 3 +-
lxd/instance/instance_utils.go | 528 +++++++++++++++++++++++++++++++++
lxd/main_activateifneeded.go | 3 +-
lxd/migrate_container.go | 2 +-
lxd/patches.go | 13 +-
lxd/profiles.go | 3 +-
lxd/profiles_utils.go | 3 +-
lxd/storage.go | 2 +-
lxd/storage_btrfs.go | 4 +-
lxd/storage_ceph.go | 2 +-
lxd/storage_dir.go | 4 +-
lxd/storage_lvm.go | 4 +-
lxd/storage_migration.go | 4 +-
lxd/storage_zfs.go | 8 +-
35 files changed, 645 insertions(+), 582 deletions(-)
diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 38ee0da172..1740f1bfb0 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -21,6 +21,7 @@ import (
"github.com/lxc/lxd/lxd/db/node"
"github.com/lxc/lxd/lxd/db/query"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/response"
@@ -125,7 +126,7 @@ func internalContainerOnStart(d *Daemon, r *http.Request) response.Response {
return response.SmartError(err)
}
- inst, err := instanceLoadById(d.State(), id)
+ inst, err := instance.LoadByID(d.State(), id)
if err != nil {
return response.SmartError(err)
}
@@ -156,7 +157,7 @@ func internalContainerOnStopNS(d *Daemon, r *http.Request) response.Response {
}
netns := queryParam(r, "netns")
- inst, err := instanceLoadById(d.State(), id)
+ inst, err := instance.LoadByID(d.State(), id)
if err != nil {
return response.SmartError(err)
}
@@ -186,7 +187,7 @@ func internalContainerOnStop(d *Daemon, r *http.Request) response.Response {
target = "unknown"
}
- inst, err := instanceLoadById(d.State(), id)
+ inst, err := instance.LoadByID(d.State(), id)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/backup.go b/lxd/backup.go
index a01a9eb23f..07805ceb16 100644
--- a/lxd/backup.go
+++ b/lxd/backup.go
@@ -49,7 +49,7 @@ func backupCreate(s *state.State, args db.InstanceBackupArgs, sourceInst instanc
}()
// Get the backup struct.
- b, err := backup.LoadByName(s, sourceInst.Project(), args.Name)
+ b, err := instance.BackupLoadByName(s, sourceInst.Project(), args.Name)
if err != nil {
return errors.Wrap(err, "Load backup object")
}
@@ -280,7 +280,7 @@ func pruneExpiredContainerBackups(ctx context.Context, d *Daemon) error {
}
for _, b := range backups {
- inst, err := instanceLoadById(d.State(), b.InstanceID)
+ inst, err := instance.LoadByID(d.State(), b.InstanceID)
if err != nil {
return errors.Wrapf(err, "Error deleting container backup %s", b.Name)
}
diff --git a/lxd/container.go b/lxd/container.go
index 8eaed72952..e191581f82 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -23,13 +23,12 @@ import (
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
+ "github.com/lxc/lxd/lxd/instance/vmqemu"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/project"
- "github.com/lxc/lxd/lxd/seccomp"
"github.com/lxc/lxd/lxd/state"
storagePools "github.com/lxc/lxd/lxd/storage"
storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
- "github.com/lxc/lxd/lxd/sys"
"github.com/lxc/lxd/lxd/task"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
@@ -57,10 +56,10 @@ func init() {
return identifiers, nil
}
- // Expose instanceLoadByProjectAndName to the device package converting the response to an Instance.
+ // Expose instance.LoadByProjectAndName to the device package converting the response to an Instance.
// This is because container types are defined in the main package and are not importable.
device.InstanceLoadByProjectAndName = func(s *state.State, project, name string) (device.Instance, error) {
- container, err := instanceLoadByProjectAndName(s, project, name)
+ container, err := instance.LoadByProjectAndName(s, project, name)
if err != nil {
return nil, err
}
@@ -68,16 +67,13 @@ func init() {
return device.Instance(container), nil
}
- // Expose instanceLoadById to the backup package converting the response to an Instance.
- // This is because container types are defined in the main package and are not importable.
- backup.InstanceLoadByID = func(s *state.State, id int) (backup.Instance, error) {
- instance, err := instanceLoadById(s, id)
- if err != nil {
- return nil, err
- }
+ // Expose instanceValidDevices to the instance package. This is because it relies on
+ // containerLXC which cannot be moved out of main package at this time.
+ instance.ValidDevices = instanceValidDevices
- return backup.Instance(instance), nil
- }
+ // Expose instanceLoad to the instance package. This is because it relies on containerLXC
+ // which cannot be moved out of main package this time.
+ instance.Load = instanceLoad
}
// Helper functions
@@ -96,105 +92,6 @@ func containerValidName(name string) error {
return nil
}
-func containerValidConfigKey(os *sys.OS, key string, value string) error {
- f, err := shared.ConfigKeyChecker(key)
- if err != nil {
- return err
- }
- if err = f(value); err != nil {
- return err
- }
- if key == "raw.lxc" {
- return lxcValidConfig(value)
- }
- if key == "security.syscalls.blacklist_compat" {
- for _, arch := range os.Architectures {
- if arch == osarch.ARCH_64BIT_INTEL_X86 ||
- arch == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN ||
- arch == osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN {
- return nil
- }
- }
- return fmt.Errorf("security.syscalls.blacklist_compat isn't supported on this architecture")
- }
- return nil
-}
-
-func allowedUnprivilegedOnlyMap(rawIdmap string) error {
- rawMaps, err := parseRawIdmap(rawIdmap)
- if err != nil {
- return err
- }
-
- for _, ent := range rawMaps {
- if ent.Hostid == 0 {
- return fmt.Errorf("Cannot map root user into container as LXD was configured to only allow unprivileged containers")
- }
- }
-
- return nil
-}
-
-func containerValidConfig(sysOS *sys.OS, config map[string]string, profile bool, expanded bool) error {
- if config == nil {
- return nil
- }
-
- for k, v := range config {
- if profile && strings.HasPrefix(k, "volatile.") {
- return fmt.Errorf("Volatile keys can only be set on containers")
- }
-
- if profile && strings.HasPrefix(k, "image.") {
- return fmt.Errorf("Image keys can only be set on containers")
- }
-
- err := containerValidConfigKey(sysOS, k, v)
- if err != nil {
- return err
- }
- }
-
- _, rawSeccomp := config["raw.seccomp"]
- _, whitelist := config["security.syscalls.whitelist"]
- _, blacklist := config["security.syscalls.blacklist"]
- blacklistDefault := shared.IsTrue(config["security.syscalls.blacklist_default"])
- blacklistCompat := shared.IsTrue(config["security.syscalls.blacklist_compat"])
-
- if rawSeccomp && (whitelist || blacklist || blacklistDefault || blacklistCompat) {
- return fmt.Errorf("raw.seccomp is mutually exclusive with security.syscalls*")
- }
-
- if whitelist && (blacklist || blacklistDefault || blacklistCompat) {
- return fmt.Errorf("security.syscalls.whitelist is mutually exclusive with security.syscalls.blacklist*")
- }
-
- _, err := seccomp.SyscallInterceptMountFilter(config)
- if err != nil {
- return err
- }
-
- if expanded && (config["security.privileged"] == "" || !shared.IsTrue(config["security.privileged"])) && sysOS.IdmapSet == nil {
- return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported")
- }
-
- unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
- if shared.IsTrue(unprivOnly) {
- if config["raw.idmap"] != "" {
- err := allowedUnprivilegedOnlyMap(config["raw.idmap"])
- if err != nil {
- return err
- }
- }
-
- if shared.IsTrue(config["security.privileged"]) {
- return fmt.Errorf("LXD was configured to only allow unprivileged containers")
- }
- }
-
- return nil
-}
-
// instanceValidDevices validate instance device configs.
func instanceValidDevices(state *state.State, cluster *db.Cluster, instanceType instancetype.Type, instanceName string, devices deviceConfig.Devices, expanded bool) error {
// Empty device list
@@ -220,17 +117,19 @@ func instanceValidDevices(state *state.State, cluster *db.Cluster, instanceType
inst = c
} else if instanceType == instancetype.VM {
- vm := &vmQemu{
- dbType: instancetype.VM,
- name: instanceName,
- localDevices: devices.Clone(), // Prevent devices from modifying their config.
+ instArgs := db.InstanceArgs{
+ Name: instanceName,
+ Type: instancetype.VM,
+ Devices: devices.Clone(), // Prevent devices from modifying their config.
}
if expanded {
- vm.expandedDevices = vm.localDevices // Avoid another clone.
+ // The devices being validated are already expanded, so just use the same
+ // devices clone as we used for the main devices config.
+ inst = vmqemu.Instantiate(state, instArgs, instArgs.Devices)
+ } else {
+ inst = vmqemu.Instantiate(state, instArgs, nil)
}
-
- inst = vm
} else {
return fmt.Errorf("Invalid instance type")
}
@@ -531,7 +430,7 @@ func instanceCreateAsCopy(s *state.State, args db.InstanceArgs, sourceInst insta
if refresh {
// Load the target instance.
- inst, err = instanceLoadByProjectAndName(s, args.Project, args.Name)
+ inst, err = instance.LoadByProjectAndName(s, args.Project, args.Name)
if err != nil {
refresh = false // Instance doesn't exist, so switch to copy mode.
}
@@ -808,7 +707,7 @@ func instanceCreateAsSnapshot(s *state.State, args db.InstanceArgs, sourceInstan
}
// Attempt to update backup.yaml for instance.
- err = writeBackupFile(sourceInstance)
+ err = instance.WriteBackupFile(s, sourceInstance)
if err != nil {
return nil, err
}
@@ -867,7 +766,7 @@ func instanceCreateInternal(s *state.State, args db.InstanceArgs) (instance.Inst
}
// Validate container config.
- err := containerValidConfig(s.OS, args.Config, false, false)
+ err := instance.ValidConfig(s.OS, args.Config, false, false)
if err != nil {
return nil, err
}
@@ -1035,7 +934,7 @@ func instanceCreateInternal(s *state.State, args db.InstanceArgs) (instance.Inst
if args.Type == instancetype.Container {
inst, err = containerLXCCreate(s, args)
} else if args.Type == instancetype.VM {
- inst, err = vmQemuCreate(s, args)
+ inst, err = vmqemu.Create(s, args)
} else {
return nil, fmt.Errorf("Instance type invalid")
}
@@ -1113,7 +1012,7 @@ func instanceConfigureInternal(state *state.State, c instance.Instance) error {
return fmt.Errorf("Instance type not supported")
}
- err = writeBackupFile(c)
+ err = instance.WriteBackupFile(state, c)
if err != nil {
return err
}
@@ -1121,61 +1020,6 @@ func instanceConfigureInternal(state *state.State, c instance.Instance) error {
return nil
}
-func instanceLoadById(s *state.State, id int) (instance.Instance, error) {
- // Get the DB record
- project, name, err := s.Cluster.ContainerProjectAndName(id)
- if err != nil {
- return nil, err
- }
-
- return instanceLoadByProjectAndName(s, project, name)
-}
-
-func instanceLoadByProjectAndName(s *state.State, project, name string) (instance.Instance, error) {
- // Get the DB record
- var container *db.Instance
- err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
- var err error
-
- if strings.Contains(name, shared.SnapshotDelimiter) {
- parts := strings.SplitN(name, shared.SnapshotDelimiter, 2)
- instanceName := parts[0]
- snapshotName := parts[1]
-
- instance, err := tx.InstanceGet(project, instanceName)
- if err != nil {
- return errors.Wrapf(err, "Failed to fetch instance %q in project %q", name, project)
- }
-
- snapshot, err := tx.InstanceSnapshotGet(project, instanceName, snapshotName)
- if err != nil {
- return errors.Wrapf(err, "Failed to fetch snapshot %q of instance %q in project %q", snapshotName, instanceName, project)
- }
-
- c := db.InstanceSnapshotToInstance(instance, snapshot)
- container = &c
- } else {
- container, err = tx.InstanceGet(project, name)
- if err != nil {
- return errors.Wrapf(err, "Failed to fetch container %q in project %q", name, project)
- }
- }
-
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- args := db.ContainerToArgs(container)
- inst, err := instanceLoad(s, args, nil)
- if err != nil {
- return nil, errors.Wrap(err, "Failed to load container")
- }
-
- return inst, nil
-}
-
func instanceLoadByProject(s *state.State, project string) ([]instance.Instance, error) {
// Get all the containers
var cts []db.Instance
@@ -1327,7 +1171,7 @@ func instanceLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile)
if args.Type == instancetype.Container {
inst, err = containerLXCLoad(s, args, profiles)
} else if args.Type == instancetype.VM {
- inst, err = vmQemuLoad(s, args, profiles)
+ inst, err = vmqemu.Load(s, args, profiles)
} else {
return nil, fmt.Errorf("Invalid instance type for instance %s", args.Name)
}
diff --git a/lxd/container_backup.go b/lxd/container_backup.go
index d8cc1f1a53..f47ccca3de 100644
--- a/lxd/container_backup.go
+++ b/lxd/container_backup.go
@@ -10,8 +10,8 @@ import (
"github.com/gorilla/mux"
"github.com/pkg/errors"
- "github.com/lxc/lxd/lxd/backup"
"github.com/lxc/lxd/lxd/db"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/response"
@@ -41,7 +41,7 @@ func containerBackupsGet(d *Daemon, r *http.Request) response.Response {
recursion := util.IsRecursionRequest(r)
- c, err := instanceLoadByProjectAndName(d.State(), project, cname)
+ c, err := instance.LoadByProjectAndName(d.State(), project, cname)
if err != nil {
return response.SmartError(err)
}
@@ -90,7 +90,7 @@ func containerBackupsPost(d *Daemon, r *http.Request) response.Response {
return resp
}
- inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+ inst, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -212,7 +212,7 @@ func containerBackupGet(d *Daemon, r *http.Request) response.Response {
}
fullName := name + shared.SnapshotDelimiter + backupName
- backup, err := backup.LoadByName(d.State(), project, fullName)
+ backup, err := instance.BackupLoadByName(d.State(), project, fullName)
if err != nil {
return response.SmartError(err)
}
@@ -251,7 +251,7 @@ func containerBackupPost(d *Daemon, r *http.Request) response.Response {
}
oldName := name + shared.SnapshotDelimiter + backupName
- backup, err := backup.LoadByName(d.State(), project, oldName)
+ backup, err := instance.BackupLoadByName(d.State(), project, oldName)
if err != nil {
return response.SmartError(err)
}
@@ -299,7 +299,7 @@ func containerBackupDelete(d *Daemon, r *http.Request) response.Response {
}
fullName := name + shared.SnapshotDelimiter + backupName
- backup, err := backup.LoadByName(d.State(), project, fullName)
+ backup, err := instance.BackupLoadByName(d.State(), project, fullName)
if err != nil {
return response.SmartError(err)
}
@@ -345,7 +345,7 @@ func containerBackupExportGet(d *Daemon, r *http.Request) response.Response {
}
fullName := name + shared.SnapshotDelimiter + backupName
- backup, err := backup.LoadByName(d.State(), proj, fullName)
+ backup, err := instance.BackupLoadByName(d.State(), proj, fullName)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/container_console.go b/lxd/container_console.go
index e63d599dc0..bdb05013b9 100644
--- a/lxd/container_console.go
+++ b/lxd/container_console.go
@@ -277,7 +277,7 @@ func containerConsolePost(d *Daemon, r *http.Request) response.Response {
return operations.ForwardedOperationResponse(project, &opAPI)
}
- inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+ inst, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -345,7 +345,7 @@ func containerConsoleLogGet(d *Daemon, r *http.Request) response.Response {
return response.BadRequest(fmt.Errorf("Querying the console buffer requires liblxc >= 3.0"))
}
- inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+ inst, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -399,7 +399,7 @@ func containerConsoleLogDelete(d *Daemon, r *http.Request) response.Response {
name := mux.Vars(r)["name"]
project := projectParam(r)
- inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+ inst, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/container_delete.go b/lxd/container_delete.go
index f1e73f4198..de89f8c928 100644
--- a/lxd/container_delete.go
+++ b/lxd/container_delete.go
@@ -6,6 +6,7 @@ import (
"github.com/gorilla/mux"
"github.com/lxc/lxd/lxd/db"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/response"
)
@@ -27,7 +28,7 @@ func containerDelete(d *Daemon, r *http.Request) response.Response {
return resp
}
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/container_exec.go b/lxd/container_exec.go
index dd2e95c2d6..cb6f224c99 100644
--- a/lxd/container_exec.go
+++ b/lxd/container_exec.go
@@ -372,7 +372,7 @@ func containerExecPost(d *Daemon, r *http.Request) response.Response {
return operations.ForwardedOperationResponse(project, &opAPI)
}
- inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+ inst, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/container_file.go b/lxd/container_file.go
index ce8fcac1b6..297fea5af2 100644
--- a/lxd/container_file.go
+++ b/lxd/container_file.go
@@ -32,7 +32,7 @@ func containerFileHandler(d *Daemon, r *http.Request) response.Response {
return resp
}
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/container_get.go b/lxd/container_get.go
index 2ebf879d85..800fcc771a 100644
--- a/lxd/container_get.go
+++ b/lxd/container_get.go
@@ -4,6 +4,7 @@ import (
"net/http"
"github.com/gorilla/mux"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/response"
)
@@ -25,7 +26,7 @@ func containerGet(d *Daemon, r *http.Request) response.Response {
return resp
}
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 462605a404..0fbd39a07c 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -49,12 +49,11 @@ import (
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/containerwriter"
"github.com/lxc/lxd/shared/idmap"
+ log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/netutils"
"github.com/lxc/lxd/shared/osarch"
"github.com/lxc/lxd/shared/units"
-
- log "github.com/lxc/lxd/shared/log15"
)
// Helper functions
@@ -122,105 +121,6 @@ func lxcSetConfigItem(c *lxc.Container, key string, value string) error {
return nil
}
-func lxcParseRawLXC(line string) (string, string, error) {
- // Ignore empty lines
- if len(line) == 0 {
- return "", "", nil
- }
-
- // Skip whitespace {"\t", " "}
- line = strings.TrimLeft(line, "\t ")
-
- // Ignore comments
- if strings.HasPrefix(line, "#") {
- return "", "", nil
- }
-
- // Ensure the format is valid
- membs := strings.SplitN(line, "=", 2)
- if len(membs) != 2 {
- return "", "", fmt.Errorf("Invalid raw.lxc line: %s", line)
- }
-
- key := strings.ToLower(strings.Trim(membs[0], " \t"))
- val := strings.Trim(membs[1], " \t")
- return key, val, nil
-}
-
-func lxcValidConfig(rawLxc string) error {
- for _, line := range strings.Split(rawLxc, "\n") {
- key, _, err := lxcParseRawLXC(line)
- if err != nil {
- return err
- }
-
- if key == "" {
- continue
- }
-
- unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
- if shared.IsTrue(unprivOnly) {
- if key == "lxc.idmap" || key == "lxc.id_map" || key == "lxc.include" {
- return fmt.Errorf("%s can't be set in raw.lxc as LXD was configured to only allow unprivileged containers", key)
- }
- }
-
- // Blacklist some keys
- if key == "lxc.logfile" || key == "lxc.log.file" {
- return fmt.Errorf("Setting lxc.logfile is not allowed")
- }
-
- if key == "lxc.syslog" || key == "lxc.log.syslog" {
- return fmt.Errorf("Setting lxc.log.syslog is not allowed")
- }
-
- if key == "lxc.ephemeral" {
- return fmt.Errorf("Setting lxc.ephemeral is not allowed")
- }
-
- if strings.HasPrefix(key, "lxc.prlimit.") {
- return fmt.Errorf(`Process limits should be set via ` +
- `"limits.kernel.[limit name]" and not ` +
- `directly via "lxc.prlimit.[limit name]"`)
- }
-
- networkKeyPrefix := "lxc.net."
- if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
- networkKeyPrefix = "lxc.network."
- }
-
- if strings.HasPrefix(key, networkKeyPrefix) {
- fields := strings.Split(key, ".")
-
- if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
- // lxc.network.X.ipv4 or lxc.network.X.ipv6
- if len(fields) == 4 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) {
- continue
- }
-
- // lxc.network.X.ipv4.gateway or lxc.network.X.ipv6.gateway
- if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" {
- continue
- }
- } else {
- // lxc.net.X.ipv4.address or lxc.net.X.ipv6.address
- if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "address" {
- continue
- }
-
- // lxc.net.X.ipv4.gateway or lxc.net.X.ipv6.gateway
- if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" {
- continue
- }
- }
-
- return fmt.Errorf("Only interface-specific ipv4/ipv6 %s keys are allowed", networkKeyPrefix)
- }
- }
-
- return nil
-}
-
func lxcStatusCode(state lxc.State) api.StatusCode {
return map[int]api.StatusCode{
1: api.Stopped,
@@ -288,7 +188,7 @@ func containerLXCCreate(s *state.State, args db.InstanceArgs) (instance.Instance
}
// Validate expanded config
- err = containerValidConfig(s.OS, c.expandedConfig, false, true)
+ err = instance.ValidConfig(s.OS, c.expandedConfig, false, true)
if err != nil {
c.Delete()
logger.Error("Failed creating container", ctxMap)
@@ -600,99 +500,13 @@ func idmapSize(state *state.State, isolatedStr string, size string) (int64, erro
var idmapLock sync.Mutex
-func parseRawIdmap(value string) ([]idmap.IdmapEntry, error) {
- getRange := func(r string) (int64, int64, error) {
- entries := strings.Split(r, "-")
- if len(entries) > 2 {
- return -1, -1, fmt.Errorf("invalid raw.idmap range %s", r)
- }
-
- base, err := strconv.ParseInt(entries[0], 10, 64)
- if err != nil {
- return -1, -1, err
- }
-
- size := int64(1)
- if len(entries) > 1 {
- size, err = strconv.ParseInt(entries[1], 10, 64)
- if err != nil {
- return -1, -1, err
- }
-
- size -= base
- size += 1
- }
-
- return base, size, nil
- }
-
- ret := idmap.IdmapSet{}
-
- for _, line := range strings.Split(value, "\n") {
- if line == "" {
- continue
- }
-
- entries := strings.Split(line, " ")
- if len(entries) != 3 {
- return nil, fmt.Errorf("invalid raw.idmap line %s", line)
- }
-
- outsideBase, outsideSize, err := getRange(entries[1])
- if err != nil {
- return nil, err
- }
-
- insideBase, insideSize, err := getRange(entries[2])
- if err != nil {
- return nil, err
- }
-
- if insideSize != outsideSize {
- return nil, fmt.Errorf("idmap ranges of different sizes %s", line)
- }
-
- entry := idmap.IdmapEntry{
- Hostid: outsideBase,
- Nsid: insideBase,
- Maprange: insideSize,
- }
-
- switch entries[0] {
- case "both":
- entry.Isuid = true
- entry.Isgid = true
- err := ret.AddSafe(entry)
- if err != nil {
- return nil, err
- }
- case "uid":
- entry.Isuid = true
- err := ret.AddSafe(entry)
- if err != nil {
- return nil, err
- }
- case "gid":
- entry.Isgid = true
- err := ret.AddSafe(entry)
- if err != nil {
- return nil, err
- }
- default:
- return nil, fmt.Errorf("invalid raw.idmap type %s", line)
- }
- }
-
- return ret.Idmap, nil
-}
-
func findIdmap(state *state.State, cName string, isolatedStr string, configBase string, configSize string, rawIdmap string) (*idmap.IdmapSet, int64, error) {
isolated := false
if shared.IsTrue(isolatedStr) {
isolated = true
}
- rawMaps, err := parseRawIdmap(rawIdmap)
+ rawMaps, err := instance.ParseRawIdmap(rawIdmap)
if err != nil {
return nil, 0, err
}
@@ -2394,7 +2208,7 @@ func (c *containerLXC) startCommon() (string, []func() error, error) {
}
// Update the backup.yaml file
- err = writeBackupFile(c)
+ err = instance.WriteBackupFile(c.state, c)
if err != nil {
if ourStart {
c.unmount()
@@ -3327,7 +3141,7 @@ func (c *containerLXC) Backups() ([]backup.Backup, error) {
// Build the backup list
backups := []backup.Backup{}
for _, backupName := range backupNames {
- backup, err := backup.LoadByName(c.state, c.project, backupName)
+ backup, err := instance.BackupLoadByName(c.state, c.project, backupName)
if err != nil {
return nil, err
}
@@ -3484,7 +3298,7 @@ func (c *containerLXC) Restore(sourceContainer instance.Instance, stateful bool)
// The old backup file may be out of date (e.g. it doesn't have all the current snapshots of
// the container listed); let's write a new one to be safe.
- err = writeBackupFile(c)
+ err = instance.WriteBackupFile(c.state, c)
if err != nil {
return err
}
@@ -3613,7 +3427,7 @@ func (c *containerLXC) Delete() error {
} else {
// Remove all snapshots by initialising each snapshot as an Instance and
// calling its Delete function.
- err := instanceDeleteSnapshots(c.state, c.Project(), c.Name())
+ err := instance.DeleteSnapshots(c.state, c.Project(), c.Name())
if err != nil {
logger.Error("Failed to delete instance snapshots", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
return err
@@ -3666,7 +3480,7 @@ func (c *containerLXC) Delete() error {
}
} else {
// Remove all snapshots.
- err := instanceDeleteSnapshots(c.state, c.Project(), c.Name())
+ err := instance.DeleteSnapshots(c.state, c.Project(), c.Name())
if err != nil {
logger.Warn("Failed to delete snapshots", log.Ctx{"name": c.Name(), "err": err})
return err
@@ -4029,96 +3843,6 @@ func (c *containerLXC) VolatileSet(changes map[string]string) error {
return nil
}
-func writeBackupFile(c instance.Instance) error {
- // We only write backup files out for actual containers
- if c.IsSnapshot() {
- return nil
- }
-
- // Immediately return if the container directory doesn't exist yet
- if !shared.PathExists(c.Path()) {
- return os.ErrNotExist
- }
-
- // Generate the YAML
- ci, _, err := c.Render()
- if err != nil {
- return errors.Wrap(err, "Failed to render container metadata")
- }
-
- snapshots, err := c.Snapshots()
- if err != nil {
- return errors.Wrap(err, "Failed to get snapshots")
- }
-
- var sis []*api.InstanceSnapshot
-
- for _, s := range snapshots {
- si, _, err := s.Render()
- if err != nil {
- return err
- }
-
- sis = append(sis, si.(*api.InstanceSnapshot))
- }
-
- poolName, err := c.StoragePool()
- if err != nil {
- return err
- }
-
- s := c.DaemonState()
- poolID, pool, err := s.Cluster.StoragePoolGet(poolName)
- if err != nil {
- return err
- }
-
- dbType := db.StoragePoolVolumeTypeContainer
- if c.Type() == instancetype.VM {
- dbType = db.StoragePoolVolumeTypeVM
- }
-
- _, volume, err := s.Cluster.StoragePoolNodeVolumeGetTypeByProject(c.Project(), c.Name(), dbType, poolID)
- if err != nil {
- return err
- }
-
- data, err := yaml.Marshal(&backup.InstanceConfig{
- Container: ci.(*api.Instance),
- Snapshots: sis,
- Pool: pool,
- Volume: volume,
- })
- if err != nil {
- return err
- }
-
- // Ensure the container is currently mounted
- if !shared.PathExists(c.RootfsPath()) {
- logger.Debug("Unable to update backup.yaml at this time", log.Ctx{"name": c.Name(), "project": c.Project()})
- return nil
- }
-
- // Write the YAML
- f, err := os.Create(filepath.Join(c.Path(), "backup.yaml"))
- if err != nil {
- return err
- }
- defer f.Close()
-
- err = f.Chmod(0400)
- if err != nil {
- return err
- }
-
- err = shared.WriteAll(f, data)
- if err != nil {
- return err
- }
-
- return nil
-}
-
func (c *containerLXC) Update(args db.InstanceArgs, userRequested bool) error {
// Set sane defaults for unset keys
if args.Project == "" {
@@ -4142,7 +3866,7 @@ func (c *containerLXC) Update(args db.InstanceArgs, userRequested bool) error {
}
// Validate the new config
- err := containerValidConfig(c.state.OS, args.Config, false, false)
+ err := instance.ValidConfig(c.state.OS, args.Config, false, false)
if err != nil {
return errors.Wrap(err, "Invalid config")
}
@@ -4333,7 +4057,7 @@ func (c *containerLXC) Update(args db.InstanceArgs, userRequested bool) error {
})
// Do some validation of the config diff
- err = containerValidConfig(c.state.OS, c.expandedConfig, false, true)
+ err = instance.ValidConfig(c.state.OS, c.expandedConfig, false, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded config")
}
@@ -4760,7 +4484,7 @@ func (c *containerLXC) Update(args db.InstanceArgs, userRequested bool) error {
// Only update the backup file if it already exists (indicating the instance is mounted).
if shared.PathExists(filepath.Join(c.Path(), "backup.yaml")) {
- err := writeBackupFile(c)
+ err := instance.WriteBackupFile(c.state, c)
if err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "Failed to write backup file")
}
@@ -4968,7 +4692,7 @@ func (c *containerLXC) Export(w io.Writer, properties map[string]string) error {
var arch string
if c.IsSnapshot() {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(c.name)
- parent, err := instanceLoadByProjectAndName(c.state, c.project, parentName)
+ parent, err := instance.LoadByProjectAndName(c.state, c.project, parentName)
if err != nil {
ctw.Close()
logger.Error("Failed exporting instance", ctxMap)
@@ -5957,7 +5681,7 @@ func (c *containerLXC) Exec(command []string, env map[string]string, stdin *os.F
// Mitigation for CVE-2019-5736
useRexec := false
if c.expandedConfig["raw.idmap"] != "" {
- err := allowedUnprivilegedOnlyMap(c.expandedConfig["raw.idmap"])
+ err := instance.AllowedUnprivilegedOnlyMap(c.expandedConfig["raw.idmap"])
if err != nil {
useRexec = true
}
@@ -6232,7 +5956,7 @@ func (c *containerLXC) legacyStorage() storage {
}
// getStoragePool returns the current storage pool handle. To avoid a DB lookup each time this
-// function is called, the handle is cached internally in the vmQemu struct.
+// function is called, the handle is cached internally in the VMQemu struct.
func (c *containerLXC) getStoragePool() (storagePools.Pool, error) {
if c.storagePool != nil {
return c.storagePool, nil
@@ -6673,7 +6397,7 @@ func (c *containerLXC) fillNetworkDevice(name string, m deviceConfig.Device) (de
volatileHwaddr := c.localConfig[configKey]
if volatileHwaddr == "" {
// Generate a new MAC address
- volatileHwaddr, err = deviceNextInterfaceHWAddr()
+ volatileHwaddr, err = instance.DeviceNextInterfaceHWAddr()
if err != nil {
return nil, err
}
diff --git a/lxd/container_metadata.go b/lxd/container_metadata.go
index 35d0aac470..5fe5bc428f 100644
--- a/lxd/container_metadata.go
+++ b/lxd/container_metadata.go
@@ -38,7 +38,7 @@ func containerMetadataGet(d *Daemon, r *http.Request) response.Response {
}
// Load the container
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -99,7 +99,7 @@ func containerMetadataPut(d *Daemon, r *http.Request) response.Response {
}
// Load the container
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -153,7 +153,7 @@ func containerMetadataTemplatesGet(d *Daemon, r *http.Request) response.Response
}
// Load the container
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -247,7 +247,7 @@ func containerMetadataTemplatesPostPut(d *Daemon, r *http.Request) response.Resp
}
// Load the container
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -320,7 +320,7 @@ func containerMetadataTemplatesDelete(d *Daemon, r *http.Request) response.Respo
}
// Load the container
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/container_patch.go b/lxd/container_patch.go
index eecc982ced..2406feeb19 100644
--- a/lxd/container_patch.go
+++ b/lxd/container_patch.go
@@ -11,6 +11,7 @@ import (
"github.com/lxc/lxd/lxd/db"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/response"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
@@ -38,7 +39,7 @@ func containerPatch(d *Daemon, r *http.Request) response.Response {
return resp
}
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.NotFound(err)
}
diff --git a/lxd/container_post.go b/lxd/container_post.go
index 6f8d82343c..3fbf68c291 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -138,7 +138,7 @@ func containerPost(d *Daemon, r *http.Request) response.Response {
return resp
}
- inst, err = instanceLoadByProjectAndName(d.State(), project, name)
+ inst, err = instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -533,7 +533,7 @@ func internalClusterContainerMovedPost(d *Daemon, r *http.Request) response.Resp
// Used after to create the appropriate mounts point after a container has been
// moved.
func containerPostCreateContainerMountPoint(d *Daemon, project, containerName string) error {
- c, err := instanceLoadByProjectAndName(d.State(), project, containerName)
+ c, err := instance.LoadByProjectAndName(d.State(), project, containerName)
if err != nil {
return errors.Wrap(err, "Failed to load moved container on target node")
}
diff --git a/lxd/container_put.go b/lxd/container_put.go
index 0e221afb13..ec4b8c7622 100644
--- a/lxd/container_put.go
+++ b/lxd/container_put.go
@@ -9,6 +9,7 @@ import (
"github.com/lxc/lxd/lxd/db"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/response"
"github.com/lxc/lxd/lxd/state"
@@ -42,7 +43,7 @@ func containerPut(d *Daemon, r *http.Request) response.Response {
return resp
}
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.NotFound(err)
}
@@ -115,12 +116,12 @@ func instanceSnapRestore(s *state.State, project, name, snap string, stateful bo
snap = name + shared.SnapshotDelimiter + snap
}
- inst, err := instanceLoadByProjectAndName(s, project, name)
+ inst, err := instance.LoadByProjectAndName(s, project, name)
if err != nil {
return err
}
- source, err := instanceLoadByProjectAndName(s, project, snap)
+ source, err := instance.LoadByProjectAndName(s, project, snap)
if err != nil {
switch err {
case db.ErrNoSuchObject:
diff --git a/lxd/container_snapshot.go b/lxd/container_snapshot.go
index 44daff38aa..81a1fb402b 100644
--- a/lxd/container_snapshot.go
+++ b/lxd/container_snapshot.go
@@ -62,7 +62,7 @@ func containerSnapshotsGet(d *Daemon, r *http.Request) response.Response {
}
}
} else {
- c, err := instanceLoadByProjectAndName(d.State(), project, cname)
+ c, err := instance.LoadByProjectAndName(d.State(), project, cname)
if err != nil {
return response.SmartError(err)
}
@@ -113,7 +113,7 @@ func containerSnapshotsPost(d *Daemon, r *http.Request) response.Response {
* 2. copy the database info over
* 3. copy over the rootfs
*/
- inst, err := instanceLoadByProjectAndName(d.State(), project, name)
+ inst, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -206,7 +206,7 @@ func containerSnapshotHandler(d *Daemon, r *http.Request) response.Response {
if err != nil {
return response.SmartError(err)
}
- inst, err := instanceLoadByProjectAndName(
+ inst, err := instance.LoadByProjectAndName(
d.State(),
project, containerName+
shared.SnapshotDelimiter+
diff --git a/lxd/container_state.go b/lxd/container_state.go
index 3b33c68c48..2c55c022ad 100644
--- a/lxd/container_state.go
+++ b/lxd/container_state.go
@@ -9,6 +9,7 @@ import (
"github.com/gorilla/mux"
"github.com/lxc/lxd/lxd/db"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/response"
"github.com/lxc/lxd/shared"
@@ -33,7 +34,7 @@ func containerState(d *Daemon, r *http.Request) response.Response {
return resp
}
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
@@ -76,7 +77,7 @@ func containerStatePut(d *Daemon, r *http.Request) response.Response {
// Don't mess with containers while in setup mode
<-d.readyChan
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return response.SmartError(err)
}
diff --git a/lxd/container_test.go b/lxd/container_test.go
index 40bb744df6..eb258f116b 100644
--- a/lxd/container_test.go
+++ b/lxd/container_test.go
@@ -8,6 +8,7 @@ import (
"github.com/lxc/lxd/lxd/db"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
driver "github.com/lxc/lxd/lxd/storage"
"github.com/lxc/lxd/shared"
@@ -133,7 +134,7 @@ func (suite *containerTestSuite) TestContainer_LoadFromDB() {
defer c.Delete()
// Load the container and trigger initLXC()
- c2, err := instanceLoadByProjectAndName(suite.d.State(), "default", "testFoo")
+ c2, err := instance.LoadByProjectAndName(suite.d.State(), "default", "testFoo")
c2.IsRunning()
suite.Req.Nil(err)
_, err = c2.StorageStart()
diff --git a/lxd/containers.go b/lxd/containers.go
index 67a899ef57..874848c612 100644
--- a/lxd/containers.go
+++ b/lxd/containers.go
@@ -14,8 +14,6 @@ import (
"github.com/lxc/lxd/lxd/state"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/logger"
-
- log "github.com/lxc/lxd/shared/log15"
)
var instancesCmd = APIEndpoint{
@@ -406,25 +404,3 @@ func containersShutdown(s *state.State) error {
return nil
}
-
-// instanceDeleteSnapshots calls the Delete() function on each of the supplied instance's snapshots.
-func instanceDeleteSnapshots(s *state.State, projectName, instanceName string) error {
- results, err := s.Cluster.ContainerGetSnapshots(projectName, instanceName)
- if err != nil {
- return err
- }
-
- for _, snapName := range results {
- snapInst, err := instanceLoadByProjectAndName(s, projectName, snapName)
- if err != nil {
- logger.Error("instanceDeleteSnapshots: Failed to load the snapshot", log.Ctx{"project": projectName, "instance": instanceName, "snapshot": snapName, "err": err})
- continue
- }
-
- if err := snapInst.Delete(); err != nil {
- logger.Error("instanceDeleteSnapshots: Failed to delete the snapshot", log.Ctx{"project": projectName, "instance": instanceName, "snapshot": snapName, "err": err})
- }
- }
-
- return nil
-}
diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index c8174cec9f..ab9daf5544 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -297,7 +297,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
// Early check for refresh.
if req.Source.Refresh {
// Check if the instance exists.
- inst, err = instanceLoadByProjectAndName(d.State(), project, req.Name)
+ inst, err = instance.LoadByProjectAndName(d.State(), project, req.Name)
if err != nil {
req.Source.Refresh = false
} else if inst.IsRunning() {
@@ -489,7 +489,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) response.
}
targetProject := project
- source, err := instanceLoadByProjectAndName(d.State(), sourceProject, req.Source.Source)
+ source, err := instance.LoadByProjectAndName(d.State(), sourceProject, req.Source.Source)
if err != nil {
return response.SmartError(err)
}
@@ -594,7 +594,7 @@ func createFromCopy(d *Daemon, project string, req *api.InstancesPost) response.
// Early check for refresh
if req.Source.Refresh {
// Check if the container exists
- c, err := instanceLoadByProjectAndName(d.State(), targetProject, req.Name)
+ c, err := instance.LoadByProjectAndName(d.State(), targetProject, req.Name)
if err != nil {
req.Source.Refresh = false
} else if c.IsRunning() {
@@ -776,7 +776,7 @@ func createFromBackup(d *Daemon, project string, data io.Reader, pool string) re
return fmt.Errorf("Internal import request: %v", resp.String())
}
- c, err := instanceLoadByProjectAndName(d.State(), project, bInfo.Name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, bInfo.Name)
if err != nil {
return errors.Wrap(err, "Load instance")
}
diff --git a/lxd/devices.go b/lxd/devices.go
index f286288331..fb8a84f733 100644
--- a/lxd/devices.go
+++ b/lxd/devices.go
@@ -1,11 +1,8 @@
package main
import (
- "bytes"
- "crypto/rand"
"fmt"
"io/ioutil"
- "math/big"
"os"
"path"
"sort"
@@ -532,21 +529,3 @@ func devicesRegister(s *state.State) {
}
}
}
-
-func deviceNextInterfaceHWAddr() (string, error) {
- // Generate a new random MAC address using the usual prefix
- ret := bytes.Buffer{}
- for _, c := range "00:16:3e:xx:xx:xx" {
- if c == 'x' {
- c, err := rand.Int(rand.Reader, big.NewInt(16))
- if err != nil {
- return "", err
- }
- ret.WriteString(fmt.Sprintf("%x", c.Int64()))
- } else {
- ret.WriteString(string(c))
- }
- }
-
- return ret.String(), nil
-}
diff --git a/lxd/devlxd.go b/lxd/devlxd.go
index 7e55e1783b..57970454c4 100644
--- a/lxd/devlxd.go
+++ b/lxd/devlxd.go
@@ -356,7 +356,7 @@ func findContainerForPid(pid int32, s *state.State) (*containerLXC, error) {
name = fields[1]
}
- inst, err := instanceLoadByProjectAndName(s, project, name)
+ inst, err := instance.LoadByProjectAndName(s, project, name)
if err != nil {
return nil, err
}
diff --git a/lxd/images.go b/lxd/images.go
index 95dec7e775..54be2cde1e 100644
--- a/lxd/images.go
+++ b/lxd/images.go
@@ -28,6 +28,7 @@ import (
lxd "github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/db"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/lxd/operations"
@@ -188,7 +189,7 @@ func imgPostContInfo(d *Daemon, r *http.Request, req api.ImagesPost, op *operati
info.Public = false
}
- c, err := instanceLoadByProjectAndName(d.State(), project, name)
+ c, err := instance.LoadByProjectAndName(d.State(), project, name)
if err != nil {
return nil, err
}
diff --git a/lxd/instance/instance_utils.go b/lxd/instance/instance_utils.go
index 0780843927..475b496313 100644
--- a/lxd/instance/instance_utils.go
+++ b/lxd/instance/instance_utils.go
@@ -1,11 +1,47 @@
package instance
import (
+ "bytes"
+ "crypto/rand"
+ "fmt"
+ "math/big"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
"time"
+ "github.com/pkg/errors"
+ yaml "gopkg.in/yaml.v2"
+
+ "github.com/lxc/lxd/lxd/backup"
+ "github.com/lxc/lxd/lxd/db"
+ deviceConfig "github.com/lxc/lxd/lxd/device/config"
+ "github.com/lxc/lxd/lxd/instance/instancetype"
+ "github.com/lxc/lxd/lxd/seccomp"
+ "github.com/lxc/lxd/lxd/state"
+ "github.com/lxc/lxd/lxd/sys"
+ "github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/shared"
+ "github.com/lxc/lxd/shared/api"
+ "github.com/lxc/lxd/shared/idmap"
+ log "github.com/lxc/lxd/shared/log15"
+ "github.com/lxc/lxd/shared/logger"
+ "github.com/lxc/lxd/shared/osarch"
)
+// ValidDevices is linked from main.instanceValidDevices to validate device config. Currently
+// main.instanceValidDevices uses containerLXC internally and so cannot be moved from main package.
+var ValidDevices func(state *state.State, cluster *db.Cluster, instanceType instancetype.Type, instanceName string, devices deviceConfig.Devices, expanded bool) error
+
+// Load is linked from main.instanceLoad to allow different instance types to be load,
+// including containerLXC which currently cannot be moved from main package.
+var Load func(s *state.State, args db.InstanceArgs, profiles []api.Profile) (Instance, error)
+
+// NetworkGetLeaseAddresses is linked from main.networkGetLeaseAddresses to limit scope of moving
+// network related functions into their own package at this time.
+var NetworkGetLeaseAddresses func(s *state.State, network string, hwaddr string) ([]api.InstanceStateNetworkAddress, error)
+
// CompareSnapshots returns a list of snapshots to sync to the target and a list of
// snapshots to remove from the target. A snapshot will be marked as "to sync" if it either doesn't
// exist in the target or its creation date is different to the source. A snapshot will be marked
@@ -68,3 +104,495 @@ func CompareSnapshots(source Instance, target Instance) ([]Instance, []Instance,
return toSync, toDelete, nil
}
+
+// ValidConfig validates an instance's config.
+func ValidConfig(sysOS *sys.OS, config map[string]string, profile bool, expanded bool) error {
+ if config == nil {
+ return nil
+ }
+
+ for k, v := range config {
+ if profile && strings.HasPrefix(k, "volatile.") {
+ return fmt.Errorf("Volatile keys can only be set on containers")
+ }
+
+ if profile && strings.HasPrefix(k, "image.") {
+ return fmt.Errorf("Image keys can only be set on containers")
+ }
+
+ err := validConfigKey(sysOS, k, v)
+ if err != nil {
+ return err
+ }
+ }
+
+ _, rawSeccomp := config["raw.seccomp"]
+ _, whitelist := config["security.syscalls.whitelist"]
+ _, blacklist := config["security.syscalls.blacklist"]
+ blacklistDefault := shared.IsTrue(config["security.syscalls.blacklist_default"])
+ blacklistCompat := shared.IsTrue(config["security.syscalls.blacklist_compat"])
+
+ if rawSeccomp && (whitelist || blacklist || blacklistDefault || blacklistCompat) {
+ return fmt.Errorf("raw.seccomp is mutually exclusive with security.syscalls*")
+ }
+
+ if whitelist && (blacklist || blacklistDefault || blacklistCompat) {
+ return fmt.Errorf("security.syscalls.whitelist is mutually exclusive with security.syscalls.blacklist*")
+ }
+
+ _, err := seccomp.SyscallInterceptMountFilter(config)
+ if err != nil {
+ return err
+ }
+
+ if expanded && (config["security.privileged"] == "" || !shared.IsTrue(config["security.privileged"])) && sysOS.IdmapSet == nil {
+ return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported")
+ }
+
+ unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
+ if shared.IsTrue(unprivOnly) {
+ if config["raw.idmap"] != "" {
+ err := AllowedUnprivilegedOnlyMap(config["raw.idmap"])
+ if err != nil {
+ return err
+ }
+ }
+
+ if shared.IsTrue(config["security.privileged"]) {
+ return fmt.Errorf("LXD was configured to only allow unprivileged containers")
+ }
+ }
+
+ return nil
+}
+
+func validConfigKey(os *sys.OS, key string, value string) error {
+ f, err := shared.ConfigKeyChecker(key)
+ if err != nil {
+ return err
+ }
+ if err = f(value); err != nil {
+ return err
+ }
+ if key == "raw.lxc" {
+ return lxcValidConfig(value)
+ }
+ if key == "security.syscalls.blacklist_compat" {
+ for _, arch := range os.Architectures {
+ if arch == osarch.ARCH_64BIT_INTEL_X86 ||
+ arch == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN ||
+ arch == osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN {
+ return nil
+ }
+ }
+ return fmt.Errorf("security.syscalls.blacklist_compat isn't supported on this architecture")
+ }
+ return nil
+}
+
+func lxcParseRawLXC(line string) (string, string, error) {
+ // Ignore empty lines
+ if len(line) == 0 {
+ return "", "", nil
+ }
+
+ // Skip whitespace {"\t", " "}
+ line = strings.TrimLeft(line, "\t ")
+
+ // Ignore comments
+ if strings.HasPrefix(line, "#") {
+ return "", "", nil
+ }
+
+ // Ensure the format is valid
+ membs := strings.SplitN(line, "=", 2)
+ if len(membs) != 2 {
+ return "", "", fmt.Errorf("Invalid raw.lxc line: %s", line)
+ }
+
+ key := strings.ToLower(strings.Trim(membs[0], " \t"))
+ val := strings.Trim(membs[1], " \t")
+ return key, val, nil
+}
+
+func lxcValidConfig(rawLxc string) error {
+ for _, line := range strings.Split(rawLxc, "\n") {
+ key, _, err := lxcParseRawLXC(line)
+ if err != nil {
+ return err
+ }
+
+ if key == "" {
+ continue
+ }
+
+ unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY")
+ if shared.IsTrue(unprivOnly) {
+ if key == "lxc.idmap" || key == "lxc.id_map" || key == "lxc.include" {
+ return fmt.Errorf("%s can't be set in raw.lxc as LXD was configured to only allow unprivileged containers", key)
+ }
+ }
+
+ // Blacklist some keys
+ if key == "lxc.logfile" || key == "lxc.log.file" {
+ return fmt.Errorf("Setting lxc.logfile is not allowed")
+ }
+
+ if key == "lxc.syslog" || key == "lxc.log.syslog" {
+ return fmt.Errorf("Setting lxc.log.syslog is not allowed")
+ }
+
+ if key == "lxc.ephemeral" {
+ return fmt.Errorf("Setting lxc.ephemeral is not allowed")
+ }
+
+ if strings.HasPrefix(key, "lxc.prlimit.") {
+ return fmt.Errorf(`Process limits should be set via ` +
+ `"limits.kernel.[limit name]" and not ` +
+ `directly via "lxc.prlimit.[limit name]"`)
+ }
+
+ networkKeyPrefix := "lxc.net."
+ if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+ networkKeyPrefix = "lxc.network."
+ }
+
+ if strings.HasPrefix(key, networkKeyPrefix) {
+ fields := strings.Split(key, ".")
+
+ if !util.RuntimeLiblxcVersionAtLeast(2, 1, 0) {
+ // lxc.network.X.ipv4 or lxc.network.X.ipv6
+ if len(fields) == 4 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) {
+ continue
+ }
+
+ // lxc.network.X.ipv4.gateway or lxc.network.X.ipv6.gateway
+ if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" {
+ continue
+ }
+ } else {
+ // lxc.net.X.ipv4.address or lxc.net.X.ipv6.address
+ if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "address" {
+ continue
+ }
+
+ // lxc.net.X.ipv4.gateway or lxc.net.X.ipv6.gateway
+ if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" {
+ continue
+ }
+ }
+
+ return fmt.Errorf("Only interface-specific ipv4/ipv6 %s keys are allowed", networkKeyPrefix)
+ }
+ }
+
+ return nil
+}
+
+// AllowedUnprivilegedOnlyMap checks that root user is not mapped into instance.
+func AllowedUnprivilegedOnlyMap(rawIdmap string) error {
+ rawMaps, err := ParseRawIdmap(rawIdmap)
+ if err != nil {
+ return err
+ }
+
+ for _, ent := range rawMaps {
+ if ent.Hostid == 0 {
+ return fmt.Errorf("Cannot map root user into container as LXD was configured to only allow unprivileged containers")
+ }
+ }
+
+ return nil
+}
+
+// ParseRawIdmap parses an IDMAP string.
+func ParseRawIdmap(value string) ([]idmap.IdmapEntry, error) {
+ getRange := func(r string) (int64, int64, error) {
+ entries := strings.Split(r, "-")
+ if len(entries) > 2 {
+ return -1, -1, fmt.Errorf("invalid raw.idmap range %s", r)
+ }
+
+ base, err := strconv.ParseInt(entries[0], 10, 64)
+ if err != nil {
+ return -1, -1, err
+ }
+
+ size := int64(1)
+ if len(entries) > 1 {
+ size, err = strconv.ParseInt(entries[1], 10, 64)
+ if err != nil {
+ return -1, -1, err
+ }
+
+ size -= base
+ size++
+ }
+
+ return base, size, nil
+ }
+
+ ret := idmap.IdmapSet{}
+
+ for _, line := range strings.Split(value, "\n") {
+ if line == "" {
+ continue
+ }
+
+ entries := strings.Split(line, " ")
+ if len(entries) != 3 {
+ return nil, fmt.Errorf("invalid raw.idmap line %s", line)
+ }
+
+ outsideBase, outsideSize, err := getRange(entries[1])
+ if err != nil {
+ return nil, err
+ }
+
+ insideBase, insideSize, err := getRange(entries[2])
+ if err != nil {
+ return nil, err
+ }
+
+ if insideSize != outsideSize {
+ return nil, fmt.Errorf("idmap ranges of different sizes %s", line)
+ }
+
+ entry := idmap.IdmapEntry{
+ Hostid: outsideBase,
+ Nsid: insideBase,
+ Maprange: insideSize,
+ }
+
+ switch entries[0] {
+ case "both":
+ entry.Isuid = true
+ entry.Isgid = true
+ err := ret.AddSafe(entry)
+ if err != nil {
+ return nil, err
+ }
+ case "uid":
+ entry.Isuid = true
+ err := ret.AddSafe(entry)
+ if err != nil {
+ return nil, err
+ }
+ case "gid":
+ entry.Isgid = true
+ err := ret.AddSafe(entry)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, fmt.Errorf("invalid raw.idmap type %s", line)
+ }
+ }
+
+ return ret.Idmap, nil
+}
+
+// LoadByID loads an instance by ID.
+func LoadByID(s *state.State, id int) (Instance, error) {
+ // Get the DB record
+ project, name, err := s.Cluster.ContainerProjectAndName(id)
+ if err != nil {
+ return nil, err
+ }
+
+ return LoadByProjectAndName(s, project, name)
+}
+
+// LoadByProjectAndName loads an instance by project and name.
+func LoadByProjectAndName(s *state.State, project, name string) (Instance, error) {
+ // Get the DB record
+ var container *db.Instance
+ err := s.Cluster.Transaction(func(tx *db.ClusterTx) error {
+ var err error
+
+ if strings.Contains(name, shared.SnapshotDelimiter) {
+ parts := strings.SplitN(name, shared.SnapshotDelimiter, 2)
+ instanceName := parts[0]
+ snapshotName := parts[1]
+
+ instance, err := tx.InstanceGet(project, instanceName)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to fetch instance %q in project %q", name, project)
+ }
+
+ snapshot, err := tx.InstanceSnapshotGet(project, instanceName, snapshotName)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to fetch snapshot %q of instance %q in project %q", snapshotName, instanceName, project)
+ }
+
+ c := db.InstanceSnapshotToInstance(instance, snapshot)
+ container = &c
+ } else {
+ container, err = tx.InstanceGet(project, name)
+ if err != nil {
+ return errors.Wrapf(err, "Failed to fetch container %q in project %q", name, project)
+ }
+ }
+
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ args := db.ContainerToArgs(container)
+ inst, err := Load(s, args, nil)
+ if err != nil {
+ return nil, errors.Wrap(err, "Failed to load container")
+ }
+
+ return inst, nil
+}
+
+// WriteBackupFile writes instance's config to a file.
+func WriteBackupFile(state *state.State, inst Instance) error {
+ // We only write backup files out for actual instances.
+ if inst.IsSnapshot() {
+ return nil
+ }
+
+ // Immediately return if the instance directory doesn't exist yet.
+ if !shared.PathExists(inst.Path()) {
+ return os.ErrNotExist
+ }
+
+ // Generate the YAML.
+ ci, _, err := inst.Render()
+ if err != nil {
+ return errors.Wrap(err, "Failed to render instance metadata")
+ }
+
+ snapshots, err := inst.Snapshots()
+ if err != nil {
+ return errors.Wrap(err, "Failed to get snapshots")
+ }
+
+ var sis []*api.InstanceSnapshot
+
+ for _, s := range snapshots {
+ si, _, err := s.Render()
+ if err != nil {
+ return err
+ }
+
+ sis = append(sis, si.(*api.InstanceSnapshot))
+ }
+
+ poolName, err := inst.StoragePool()
+ if err != nil {
+ return err
+ }
+
+ poolID, pool, err := state.Cluster.StoragePoolGet(poolName)
+ if err != nil {
+ return err
+ }
+
+ dbType := db.StoragePoolVolumeTypeContainer
+ if inst.Type() == instancetype.VM {
+ dbType = db.StoragePoolVolumeTypeVM
+ }
+
+ _, volume, err := state.Cluster.StoragePoolNodeVolumeGetTypeByProject(inst.Project(), inst.Name(), dbType, poolID)
+ if err != nil {
+ return err
+ }
+
+ data, err := yaml.Marshal(&backup.InstanceConfig{
+ Container: ci.(*api.Instance),
+ Snapshots: sis,
+ Pool: pool,
+ Volume: volume,
+ })
+ if err != nil {
+ return err
+ }
+
+ // Ensure the container is currently mounted.
+ if !shared.PathExists(inst.RootfsPath()) {
+ logger.Debug("Unable to update backup.yaml at this time", log.Ctx{"name": inst.Name(), "project": inst.Project()})
+ return nil
+ }
+
+ // Write the YAML
+ f, err := os.Create(filepath.Join(inst.Path(), "backup.yaml"))
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ err = f.Chmod(0400)
+ if err != nil {
+ return err
+ }
+
+ err = shared.WriteAll(f, data)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// DeleteSnapshots calls the Delete() function on each of the supplied instance's snapshots.
+func DeleteSnapshots(s *state.State, projectName, instanceName string) error {
+ results, err := s.Cluster.ContainerGetSnapshots(projectName, instanceName)
+ if err != nil {
+ return err
+ }
+
+ for _, snapName := range results {
+ snapInst, err := LoadByProjectAndName(s, projectName, snapName)
+ if err != nil {
+ logger.Error("DeleteSnapshots: Failed to load the snapshot", log.Ctx{"project": projectName, "instance": instanceName, "snapshot": snapName, "err": err})
+ continue
+ }
+
+ if err := snapInst.Delete(); err != nil {
+ logger.Error("DeleteSnapshots: Failed to delete the snapshot", log.Ctx{"project": projectName, "instance": instanceName, "snapshot": snapName, "err": err})
+ }
+ }
+
+ return nil
+}
+
+// DeviceNextInterfaceHWAddr generates a random MAC address.
+func DeviceNextInterfaceHWAddr() (string, error) {
+ // Generate a new random MAC address using the usual prefix
+ ret := bytes.Buffer{}
+ for _, c := range "00:16:3e:xx:xx:xx" {
+ if c == 'x' {
+ c, err := rand.Int(rand.Reader, big.NewInt(16))
+ if err != nil {
+ return "", err
+ }
+ ret.WriteString(fmt.Sprintf("%x", c.Int64()))
+ } else {
+ ret.WriteString(string(c))
+ }
+ }
+
+ return ret.String(), nil
+}
+
+// BackupLoadByName load an instance backup from the database.
+func BackupLoadByName(s *state.State, project, name string) (*backup.Backup, error) {
+ // Get the backup database record
+ args, err := s.Cluster.ContainerGetBackup(project, name)
+ if err != nil {
+ return nil, errors.Wrap(err, "Load backup from database")
+ }
+
+ // Load the instance it belongs to
+ instance, err := LoadByID(s, args.InstanceID)
+ if err != nil {
+ return nil, errors.Wrap(err, "Load container from database")
+ }
+
+ return backup.New(s, instance, args.ID, name, args.CreationDate, args.ExpiryDate, args.InstanceOnly, args.OptimizedStorage), nil
+}
diff --git a/lxd/main_activateifneeded.go b/lxd/main_activateifneeded.go
index 083a5bab77..1322c12f4a 100644
--- a/lxd/main_activateifneeded.go
+++ b/lxd/main_activateifneeded.go
@@ -10,6 +10,7 @@ import (
lxd "github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/db"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/node"
"github.com/lxc/lxd/shared"
@@ -122,7 +123,7 @@ func (c *cmdActivateifneeded) Run(cmd *cobra.Command, args []string) error {
}
for _, container := range containers {
- c, err := instanceLoadByProjectAndName(d.State(), container.Project, container.Name)
+ c, err := instance.LoadByProjectAndName(d.State(), container.Project, container.Name)
if err != nil {
sqldb.Close()
return err
diff --git a/lxd/migrate_container.go b/lxd/migrate_container.go
index 7f019c2e29..aba286de82 100644
--- a/lxd/migrate_container.go
+++ b/lxd/migrate_container.go
@@ -998,7 +998,7 @@ func (c *migrationSink) Do(state *state.State, migrateOp *operations.Operation)
// Check if snapshot exists already and if not then create
// a new snapshot DB record so that the storage layer can
// populate the volume on the storage device.
- _, err := instanceLoadByProjectAndName(args.Instance.DaemonState(), args.Instance.Project(), snapArgs.Name)
+ _, err := instance.LoadByProjectAndName(args.Instance.DaemonState(), args.Instance.Project(), snapArgs.Name)
if err != nil {
// Create the snapshot as it doesn't seem to exist.
_, err := instanceCreateInternal(state, snapArgs)
diff --git a/lxd/patches.go b/lxd/patches.go
index 6ddea12f2d..99923e43cf 100644
--- a/lxd/patches.go
+++ b/lxd/patches.go
@@ -17,6 +17,7 @@ import (
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/db/query"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/rsync"
driver "github.com/lxc/lxd/lxd/storage"
"github.com/lxc/lxd/shared"
@@ -1153,7 +1154,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
}
// Load the container from the database.
- ctStruct, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+ ctStruct, err := instance.LoadByProjectAndName(d.State(), "default", ct)
if err != nil {
logger.Errorf("Failed to load LVM container %s: %s", ct, err)
return err
@@ -1306,7 +1307,7 @@ func upgradeFromStorageTypeLvm(name string, d *Daemon, defaultPoolName string, d
}
// Load the snapshot from the database.
- csStruct, err := instanceLoadByProjectAndName(d.State(), "default", cs)
+ csStruct, err := instance.LoadByProjectAndName(d.State(), "default", cs)
if err != nil {
logger.Errorf("Failed to load LVM container %s: %s", cs, err)
return err
@@ -1881,7 +1882,7 @@ func updatePoolPropertyForAllObjects(d *Daemon, poolName string, allcontainers [
// Make sure all containers and snapshots have a valid disk configuration
for _, ct := range allcontainers {
- c, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+ c, err := instance.LoadByProjectAndName(d.State(), "default", ct)
if err != nil {
continue
}
@@ -1984,7 +1985,7 @@ func patchContainerConfigRegen(name string, d *Daemon) error {
for _, ct := range cts {
// Load the container from the database.
- c, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+ c, err := instance.LoadByProjectAndName(d.State(), "default", ct)
if err != nil {
logger.Errorf("Failed to open container '%s': %v", ct, err)
continue
@@ -2763,7 +2764,7 @@ func patchDevicesNewNamingScheme(name string, d *Daemon) error {
}
// Load the container from the database.
- c, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+ c, err := instance.LoadByProjectAndName(d.State(), "default", ct)
if err != nil {
logger.Errorf("Failed to load container %s: %s", ct, err)
return err
@@ -2985,7 +2986,7 @@ func patchStorageApiPermissions(name string, d *Daemon) error {
for _, ct := range cRegular {
// load the container from the database
- ctStruct, err := instanceLoadByProjectAndName(d.State(), "default", ct)
+ ctStruct, err := instance.LoadByProjectAndName(d.State(), "default", ct)
if err != nil {
return err
}
diff --git a/lxd/profiles.go b/lxd/profiles.go
index 91b488dc6d..8f2047454a 100644
--- a/lxd/profiles.go
+++ b/lxd/profiles.go
@@ -15,6 +15,7 @@ import (
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/db"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/response"
"github.com/lxc/lxd/lxd/util"
@@ -103,7 +104,7 @@ func profilesPost(d *Daemon, r *http.Request) response.Response {
return response.BadRequest(fmt.Errorf("Invalid profile name '%s'", req.Name))
}
- err := containerValidConfig(d.os, req.Config, true, false)
+ err := instance.ValidConfig(d.os, req.Config, true, false)
if err != nil {
return response.BadRequest(err)
}
diff --git a/lxd/profiles_utils.go b/lxd/profiles_utils.go
index 3f8d8d5a54..d949ab2976 100644
--- a/lxd/profiles_utils.go
+++ b/lxd/profiles_utils.go
@@ -7,6 +7,7 @@ import (
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/db/query"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
+ "github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
@@ -15,7 +16,7 @@ import (
func doProfileUpdate(d *Daemon, project, name string, id int64, profile *api.Profile, req api.ProfilePut) error {
// Sanity checks
- err := containerValidConfig(d.os, req.Config, true, false)
+ err := instance.ValidConfig(d.os, req.Config, true, false)
if err != nil {
return err
}
diff --git a/lxd/storage.go b/lxd/storage.go
index 2ac2fa3230..86ad48d88a 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -473,7 +473,7 @@ func storagePoolVolumeAttachInit(s *state.State, poolName string, volumeName str
if len(volumeUsedBy) > 1 {
for _, ctName := range volumeUsedBy {
- instt, err := instanceLoadByProjectAndName(s, c.Project(), ctName)
+ instt, err := instance.LoadByProjectAndName(s, c.Project(), ctName)
if err != nil {
continue
}
diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 932e17b363..60eebee01e 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -1187,14 +1187,14 @@ func (s *storageBtrfs) ContainerCopy(target instance.Instance, source instance.I
}
for _, snap := range snapshots {
- sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
+ sourceSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), snap.Name())
if err != nil {
return err
}
_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
- targetSnapshot, err := instanceLoadByProjectAndName(s.s, target.Project(), newSnapName)
+ targetSnapshot, err := instance.LoadByProjectAndName(s.s, target.Project(), newSnapName)
if err != nil {
return err
}
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index cb39f0a72c..6596bdbd5d 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -2867,7 +2867,7 @@ func (s *storageCeph) MigrationSource(args MigrationSourceArgs) (MigrationStorag
}
lxdName := fmt.Sprintf("%s%s%s", instanceName, shared.SnapshotDelimiter, snap[len("snapshot_"):])
- snapshot, err := instanceLoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
+ snapshot, err := instance.LoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
if err != nil {
logger.Errorf(`Failed to load snapshot "%s" for RBD storage volume "%s" on storage pool "%s": %s`, lxdName, instanceName, s.pool.Name, err)
return nil, err
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index e9db7b8c23..52e01e1814 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -798,14 +798,14 @@ func (s *storageDir) doContainerCopy(target instance.Instance, source instance.I
}
for _, snap := range snapshots {
- sourceSnapshot, err := instanceLoadByProjectAndName(srcState, source.Project(), snap.Name())
+ sourceSnapshot, err := instance.LoadByProjectAndName(srcState, source.Project(), snap.Name())
if err != nil {
return err
}
_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
- targetSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), newSnapName)
+ targetSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), newSnapName)
if err != nil {
return err
}
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index e13a9652f6..2c35f0fecd 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -1203,12 +1203,12 @@ func (s *storageLvm) doContainerCopy(target instance.Instance, source instance.I
logger.Debugf("Copying LVM container storage for snapshot %s to %s", snap.Name(), newSnapName)
- sourceSnapshot, err := instanceLoadByProjectAndName(srcState, source.Project(), snap.Name())
+ sourceSnapshot, err := instance.LoadByProjectAndName(srcState, source.Project(), snap.Name())
if err != nil {
return err
}
- targetSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), newSnapName)
+ targetSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), newSnapName)
if err != nil {
return err
}
diff --git a/lxd/storage_migration.go b/lxd/storage_migration.go
index da6e467d7f..8d46d47e1f 100644
--- a/lxd/storage_migration.go
+++ b/lxd/storage_migration.go
@@ -353,7 +353,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operations.Operation, args Mig
}
// Try and a load instance
- s, err := instanceLoadByProjectAndName(args.Instance.DaemonState(),
+ s, err := instance.LoadByProjectAndName(args.Instance.DaemonState(),
args.Instance.Project(), snapArgs.Name)
if err != nil {
// Create the snapshot since it doesn't seem to exist
@@ -430,7 +430,7 @@ func rsyncMigrationSink(conn *websocket.Conn, op *operations.Operation, args Mig
}
}
- _, err = instanceLoadByProjectAndName(args.Instance.DaemonState(),
+ _, err = instance.LoadByProjectAndName(args.Instance.DaemonState(),
args.Instance.Project(), snapArgs.Name)
if err != nil {
_, err = instanceCreateAsSnapshot(args.Instance.DaemonState(), snapArgs, args.Instance, op)
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index 5b15d6694a..16b74562ea 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -1318,7 +1318,7 @@ func (s *storageZfs) ContainerCopy(target instance.Instance, source instance.Ins
prev = snapshots[i-1].Name()
}
- sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
+ sourceSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), snap.Name())
if err != nil {
return err
}
@@ -1326,7 +1326,7 @@ func (s *storageZfs) ContainerCopy(target instance.Instance, source instance.Ins
_, snapOnlyName, _ := shared.InstanceGetParentAndSnapshotName(snap.Name())
prevSnapOnlyName = snapOnlyName
newSnapName := fmt.Sprintf("%s/%s", target.Name(), snapOnlyName)
- targetSnapshot, err := instanceLoadByProjectAndName(s.s, target.Project(), newSnapName)
+ targetSnapshot, err := instance.LoadByProjectAndName(s.s, target.Project(), newSnapName)
if err != nil {
return err
}
@@ -1955,7 +1955,7 @@ func (s *storageZfs) doContainerBackupCreateOptimized(tmpPath string, backup bac
prev = snapshots[i-1].Name()
}
- sourceSnapshot, err := instanceLoadByProjectAndName(s.s, source.Project(), snap.Name())
+ sourceSnapshot, err := instance.LoadByProjectAndName(s.s, source.Project(), snap.Name())
if err != nil {
return err
}
@@ -2566,7 +2566,7 @@ func (s *storageZfs) MigrationSource(args MigrationSourceArgs) (MigrationStorage
}
lxdName := fmt.Sprintf("%s%s%s", args.Instance.Name(), shared.SnapshotDelimiter, snap[len("snapshot-"):])
- snapshot, err := instanceLoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
+ snapshot, err := instance.LoadByProjectAndName(s.s, args.Instance.Project(), lxdName)
if err != nil {
return nil, err
}
More information about the lxc-devel
mailing list