[lxc-devel] [lxd/master] VM Create

tomponline on Github lxc-bot at linuxcontainers.org
Wed Nov 6 17:11:11 UTC 2019


A non-text attachment was scrubbed...
Name: not available
Type: text/x-mailbox
Size: 555 bytes
Desc: not available
URL: <http://lists.linuxcontainers.org/pipermail/lxc-devel/attachments/20191106/6c4f6c94/attachment-0001.bin>
-------------- next part --------------
From 840aa05a04203b247b15a886d669766e0b514cd5 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 09:13:06 +0000
Subject: [PATCH 01/30] lxc/init: Adds vm flag to init command

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxc/init.go | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/lxc/init.go b/lxc/init.go
index 4a1cee0948..9e749bf935 100644
--- a/lxc/init.go
+++ b/lxc/init.go
@@ -30,6 +30,7 @@ type cmdInit struct {
 	flagType       string
 	flagNoProfiles bool
 	flagEmpty      bool
+	flagVM         bool
 }
 
 func (c *cmdInit) Command() *cobra.Command {
@@ -53,6 +54,7 @@ lxc init ubuntu:16.04 u1 < config.yaml
 	cmd.Flags().StringVar(&c.flagTarget, "target", "", i18n.G("Cluster member name")+"``")
 	cmd.Flags().BoolVar(&c.flagNoProfiles, "no-profiles", false, i18n.G("Create the container with no profiles applied"))
 	cmd.Flags().BoolVar(&c.flagEmpty, "empty", false, i18n.G("Create an empty container"))
+	cmd.Flags().BoolVar(&c.flagVM, "vm", false, i18n.G("Create virtual machine"))
 
 	return cmd
 }
@@ -150,7 +152,7 @@ func (c *cmdInit) create(conf *config.Config, args []string) (lxd.InstanceServer
 
 	if !c.global.flagQuiet {
 		if name == "" {
-			fmt.Printf(i18n.G("Creating the container") + "\n")
+			fmt.Printf(i18n.G("Creating the instance") + "\n")
 		} else {
 			fmt.Printf(i18n.G("Creating %s")+"\n", name)
 		}
@@ -203,10 +205,17 @@ func (c *cmdInit) create(conf *config.Config, args []string) (lxd.InstanceServer
 		}
 	}
 
-	// Setup container creation request
+	// Decide whether we are creating a container or a virtual machine.
+	instanceDBType := api.InstanceTypeContainer
+	if c.flagVM {
+		instanceDBType = api.InstanceTypeVM
+	}
+
+	// Setup instance creation request
 	req := api.InstancesPost{
 		Name:         name,
 		InstanceType: c.flagType,
+		Type:         instanceDBType,
 	}
 	req.Config = configMap
 	req.Devices = devicesMap
@@ -265,7 +274,7 @@ func (c *cmdInit) create(conf *config.Config, args []string) (lxd.InstanceServer
 			}
 		}
 
-		// Create the container
+		// Create the instance
 		op, err := d.CreateInstanceFromImage(imgRemote, *imgInfo, req)
 		if err != nil {
 			return nil, "", err
@@ -313,6 +322,7 @@ func (c *cmdInit) create(conf *config.Config, args []string) (lxd.InstanceServer
 		opInfo = op.Get()
 	}
 
+	// tomp TODO should this field be renamed?
 	containers, ok := opInfo.Resources["containers"]
 	if !ok || len(containers) == 0 {
 		return nil, "", fmt.Errorf(i18n.G("Didn't get any affected image, container or snapshot from server"))
@@ -321,7 +331,7 @@ func (c *cmdInit) create(conf *config.Config, args []string) (lxd.InstanceServer
 	if len(containers) == 1 && name == "" {
 		fields := strings.Split(containers[0], "/")
 		name = fields[len(fields)-1]
-		fmt.Printf(i18n.G("Container name is: %s")+"\n", name)
+		fmt.Printf(i18n.G("Instance name is: %s")+"\n", name)
 	}
 
 	// Validate the network setup

From 76f88af9a0a5aa75f7012762d8397d49cccfc401 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 10:55:21 +0000
Subject: [PATCH 02/30] lxd/vm/qemu: Initial implementation of VM Qemu instance
 type

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/vm_qemu.go | 1951 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 1951 insertions(+)
 create mode 100644 lxd/vm_qemu.go

diff --git a/lxd/vm_qemu.go b/lxd/vm_qemu.go
new file mode 100644
index 0000000000..e6fa0823f5
--- /dev/null
+++ b/lxd/vm_qemu.go
@@ -0,0 +1,1951 @@
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/digitalocean/go-qemu/qmp"
+	"github.com/linuxkit/virtsock/pkg/vsock"
+	"github.com/pborman/uuid"
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+
+	"github.com/lxc/lxd/lxd/backup"
+	"github.com/lxc/lxd/lxd/cluster"
+	"github.com/lxc/lxd/lxd/db"
+	"github.com/lxc/lxd/lxd/db/query"
+	"github.com/lxc/lxd/lxd/device"
+	deviceConfig "github.com/lxc/lxd/lxd/device/config"
+	"github.com/lxc/lxd/lxd/instance/instancetype"
+	"github.com/lxc/lxd/lxd/maas"
+	"github.com/lxc/lxd/lxd/operations"
+	"github.com/lxc/lxd/lxd/project"
+	"github.com/lxc/lxd/lxd/state"
+	storagePools "github.com/lxc/lxd/lxd/storage"
+	"github.com/lxc/lxd/lxd/util"
+	"github.com/lxc/lxd/shared"
+	"github.com/lxc/lxd/shared/api"
+	log "github.com/lxc/lxd/shared/log15"
+	"github.com/lxc/lxd/shared/logger"
+	"github.com/lxc/lxd/shared/osarch"
+)
+
+var vmVsockTimeout time.Duration = time.Second
+
+func vmQemuLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (Instance, error) {
+	// Create the container struct.
+	vm := vmQemuInstantiate(s, args)
+
+	// Expand config and devices.
+	err := vm.expandConfig(profiles)
+	if err != nil {
+		return nil, err
+	}
+
+	err = vm.expandDevices(profiles)
+	if err != nil {
+		return nil, err
+	}
+
+	return vm, nil
+}
+
+// vmQemuInstantiate creates a vmQemu struct without initializing it.
+func vmQemuInstantiate(s *state.State, args db.InstanceArgs) *vmQemu {
+	vm := &vmQemu{
+		state:        s,
+		id:           args.ID,
+		project:      args.Project,
+		name:         args.Name,
+		description:  args.Description,
+		ephemeral:    args.Ephemeral,
+		architecture: args.Architecture,
+		dbType:       args.Type,
+		snapshot:     args.Snapshot,
+		creationDate: args.CreationDate,
+		lastUsedDate: args.LastUsedDate,
+		profiles:     args.Profiles,
+		localConfig:  args.Config,
+		localDevices: args.Devices,
+		stateful:     args.Stateful,
+		node:         args.Node,
+		expiryDate:   args.ExpiryDate,
+	}
+
+	// Cleanup the zero values.
+	if vm.expiryDate.IsZero() {
+		vm.expiryDate = time.Time{}
+	}
+
+	if vm.creationDate.IsZero() {
+		vm.creationDate = time.Time{}
+	}
+
+	if vm.lastUsedDate.IsZero() {
+		vm.lastUsedDate = time.Time{}
+	}
+
+	return vm
+}
+
+// vmQemuCreate creates a new storage volume record and returns an initialised Instance.
+func vmQemuCreate(s *state.State, args db.InstanceArgs) (Instance, error) {
+	// Create the instance struct.
+	vm := &vmQemu{
+		state:        s,
+		id:           args.ID,
+		project:      args.Project,
+		name:         args.Name,
+		node:         args.Node,
+		description:  args.Description,
+		ephemeral:    args.Ephemeral,
+		architecture: args.Architecture,
+		dbType:       args.Type,
+		snapshot:     args.Snapshot,
+		stateful:     args.Stateful,
+		creationDate: args.CreationDate,
+		lastUsedDate: args.LastUsedDate,
+		profiles:     args.Profiles,
+		localConfig:  args.Config,
+		localDevices: args.Devices,
+		expiryDate:   args.ExpiryDate,
+	}
+
+	// Cleanup the zero values.
+	if vm.expiryDate.IsZero() {
+		vm.expiryDate = time.Time{}
+	}
+
+	if vm.creationDate.IsZero() {
+		vm.creationDate = time.Time{}
+	}
+
+	if vm.lastUsedDate.IsZero() {
+		vm.lastUsedDate = time.Time{}
+	}
+
+	ctxMap := log.Ctx{
+		"project":   args.Project,
+		"name":      vm.name,
+		"ephemeral": vm.ephemeral,
+	}
+
+	logger.Info("Creating instance", ctxMap)
+
+	// Load the config.
+	err := vm.init()
+	if err != nil {
+		vm.Delete()
+		logger.Error("Failed creating instance", ctxMap)
+		return nil, err
+	}
+
+	// Validate expanded config
+	err = containerValidConfig(s.OS, vm.expandedConfig, false, true)
+	if err != nil {
+		vm.Delete()
+		logger.Error("Failed creating instance", ctxMap)
+		return nil, err
+	}
+
+	err = containerValidDevices(s, s.Cluster, vm.Name(), vm.expandedDevices, true)
+	if err != nil {
+		vm.Delete()
+		logger.Error("Failed creating instance", ctxMap)
+		return nil, errors.Wrap(err, "Invalid devices")
+	}
+
+	// Retrieve the instance's storage pool
+	_, rootDiskDevice, err := shared.GetRootDiskDevice(vm.expandedDevices.CloneNative())
+	if err != nil {
+		vm.Delete()
+		return nil, err
+	}
+
+	if rootDiskDevice["pool"] == "" {
+		vm.Delete()
+		return nil, fmt.Errorf("The instances's root device is missing the pool property")
+	}
+
+	storagePool := rootDiskDevice["pool"]
+
+	// Get the storage pool ID for the instance.
+	poolID, pool, err := s.Cluster.StoragePoolGet(storagePool)
+	if err != nil {
+		vm.Delete()
+		return nil, err
+	}
+
+	// Fill in any default volume config.
+	volumeConfig := map[string]string{}
+	err = storagePools.VolumeFillDefault(storagePool, volumeConfig, pool)
+	if err != nil {
+		vm.Delete()
+		return nil, err
+	}
+
+	// Create a new database entry for the instance's storage volume.
+	_, err = s.Cluster.StoragePoolVolumeCreate(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, false, poolID, volumeConfig)
+	if err != nil {
+		vm.Delete()
+		return nil, err
+	}
+
+	if !vm.IsSnapshot() {
+		// Update MAAS.
+		err = vm.maasUpdate(nil)
+		if err != nil {
+			vm.Delete()
+			logger.Error("Failed creating container", ctxMap)
+			return nil, err
+		}
+
+		// Add devices to instance.
+		for k, m := range vm.expandedDevices {
+			err = vm.deviceAdd(k, m)
+			if err != nil && err != device.ErrUnsupportedDevType {
+				vm.Delete()
+				return nil, errors.Wrapf(err, "Failed to add device '%s'", k)
+			}
+		}
+	}
+
+	logger.Info("Created instance", ctxMap)
+	// tomp TODO should this event name be changed?
+	vm.state.Events.SendLifecycle(vm.project, "container-created",
+		fmt.Sprintf("/1.0/containers/%s", vm.name), nil)
+
+	return vm, nil
+}
+
+// The QEMU virtual machine driver.
+type vmQemu struct {
+	// Properties.
+	architecture int
+	dbType       instancetype.Type
+	snapshot     bool
+	creationDate time.Time
+	lastUsedDate time.Time
+	ephemeral    bool
+	id           int
+	project      string
+	name         string
+	description  string
+	stateful     bool
+
+	// Config.
+	expandedConfig  map[string]string
+	expandedDevices deviceConfig.Devices
+	localConfig     map[string]string
+	localDevices    deviceConfig.Devices
+	profiles        []string
+
+	state *state.State
+
+	// Storage.
+	storage storage
+
+	// Clustering.
+	node string
+
+	// Progress tracking.
+	op *operations.Operation
+
+	expiryDate time.Time
+}
+
+func (vm *vmQemu) Freeze() error {
+	return nil
+}
+
+func (vm *vmQemu) Shutdown(timeout time.Duration) error {
+	if !vm.IsRunning() {
+		return fmt.Errorf("The instance is already stopped")
+	}
+
+	// Connect to the monitor.
+	monitor, err := qmp.NewSocketMonitor("unix", vm.getMonitorPath(), vmVsockTimeout)
+	if err != nil {
+		return err
+	}
+
+	err = monitor.Connect()
+	if err != nil {
+		return err
+	}
+	defer monitor.Disconnect()
+
+	// Send the system_powerdown command.
+	_, err = monitor.Run([]byte("{'execute': 'system_powerdown'}"))
+	if err != nil {
+		return err
+	}
+	monitor.Disconnect()
+
+	// Deal with the timeout.
+	chShutdown := make(chan struct{}, 1)
+	go func() {
+		for {
+			// Connect to socket, check if still running, then disconnect so we don't
+			// block the qemu monitor socket for other users (such as lxc list).
+			if !vm.IsRunning() {
+				close(chShutdown)
+				return
+			}
+
+			time.Sleep(500 * time.Millisecond) // Don't consume too many resources.
+		}
+	}()
+
+	// If timeout provided, block until the VM is not running or the timeout has elapsed.
+	if timeout > 0 {
+		select {
+		case <-chShutdown:
+			return nil
+		case <-time.After(timeout):
+			return fmt.Errorf("Instance was not shutdown after timeout")
+		}
+	} else {
+		<-chShutdown // Block until VM is not running if no timeout provided.
+	}
+
+	vm.cleanupDevices()
+	os.Remove(vm.pidFilePath())
+	os.Remove(vm.getMonitorPath())
+
+	return nil
+}
+
+func (vm *vmQemu) Start(stateful bool) error {
+	// Ensure the correct vhost_vsock kernel module is loaded before establishing the vsock.
+	err := util.LoadModule("vhost_vsock")
+	if err != nil {
+		return err
+	}
+
+	if vm.IsRunning() {
+		return fmt.Errorf("The instance is already running")
+	}
+
+	// Create any missing directories.
+	err = os.MkdirAll(vm.Path(), 0100)
+	if err != nil {
+		return err
+	}
+
+	pidFile := vm.DevicesPath() + "/qemu.pid"
+	configISOPath, err := vm.generateConfigDrive()
+	if err != nil {
+		return err
+	}
+
+	err = os.MkdirAll(vm.LogPath(), 0700)
+	if err != nil {
+		return err
+	}
+
+	err = os.MkdirAll(vm.DevicesPath(), 0711)
+	if err != nil {
+		return err
+	}
+
+	err = os.MkdirAll(vm.ShmountsPath(), 0711)
+	if err != nil {
+		return err
+	}
+
+	_, err = vm.StorageStart()
+	if err != nil {
+		return err
+	}
+
+	// Get a UUID for Qemu.
+	vmUUID := vm.localConfig["volatile.vm.uuid"]
+	if vmUUID == "" {
+		vmUUID = uuid.New()
+		vm.VolatileSet(map[string]string{"volatile.vm.uuid": vmUUID})
+	}
+
+	// Generate an empty nvram file.
+	nvramFile, err := os.OpenFile(vm.getNvramPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0640)
+	if err != nil {
+		return err
+	}
+	err = nvramFile.Truncate(131072)
+	if err != nil {
+		return err
+	}
+	nvramFile.Close()
+
+	tapDev := map[string]string{}
+
+	// Setup devices in sorted order, this ensures that device mounts are added in path order.
+	for _, dev := range vm.expandedDevices.Sorted() {
+		// Start the device.
+		runConf, err := vm.deviceStart(dev.Name, dev.Config, false)
+		if err != nil {
+			return errors.Wrapf(err, "Failed to start device '%s'", dev.Name)
+		}
+
+		if runConf == nil {
+			continue
+		}
+
+		if len(runConf.NetworkInterface) > 0 {
+			for _, nicItem := range runConf.NetworkInterface {
+				if nicItem.Key == "link" {
+					tapDev["tap"] = nicItem.Value
+					tapDev["hwaddr"] = vm.localConfig[fmt.Sprintf("volatile.%s.hwaddr", dev.Name)]
+				}
+			}
+
+		}
+	}
+
+	confFile, err := vm.generateQemuConfigFile(configISOPath, tapDev)
+	if err != nil {
+		return err
+	}
+
+	_, err = shared.RunCommand("qemu-system-x86_64", "-name", vm.Name(), "-uuid", vmUUID, "-daemonize", "-cpu", "host", "-nographic", "-serial", "chardev:console", "-nodefaults", "-readconfig", confFile, "-pidfile", pidFile)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// deviceVolatileGetFunc returns a function that retrieves a named device's volatile config and
+// removes its device prefix from the keys.
+func (vm *vmQemu) deviceVolatileGetFunc(devName string) func() map[string]string {
+	return func() map[string]string {
+		volatile := make(map[string]string)
+		prefix := fmt.Sprintf("volatile.%s.", devName)
+		for k, v := range vm.localConfig {
+			if strings.HasPrefix(k, prefix) {
+				volatile[strings.TrimPrefix(k, prefix)] = v
+			}
+		}
+		return volatile
+	}
+}
+
+// deviceVolatileSetFunc returns a function that can be called to save a named device's volatile
+// config using keys that do not have the device's name prefixed.
+func (vm *vmQemu) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
+	return func(save map[string]string) error {
+		volatileSave := make(map[string]string)
+		for k, v := range save {
+			volatileSave[fmt.Sprintf("volatile.%s.%s", devName, k)] = v
+		}
+
+		return vm.VolatileSet(volatileSave)
+	}
+}
+
+// deviceLoad instantiates and validates a new device and returns it along with enriched config.
+func (vm *vmQemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
+	var configCopy deviceConfig.Device
+	var err error
+
+	// Create copy of config and load some fields from volatile if device is nic or infiniband.
+	if shared.StringInSlice(rawConfig["type"], []string{"nic", "infiniband"}) {
+		configCopy, err = vm.fillNetworkDevice(deviceName, rawConfig)
+		if err != nil {
+			return nil, nil, err
+		}
+	} else {
+		// Othewise copy the config so it cannot be modified by device.
+		configCopy = rawConfig.Clone()
+	}
+
+	d, err := device.New(vm, vm.state, deviceName, configCopy, vm.deviceVolatileGetFunc(deviceName), vm.deviceVolatileSetFunc(deviceName))
+
+	// Return device and config copy even if error occurs as caller may still use device.
+	return d, configCopy, err
+}
+
+// deviceStart loads a new device and calls its Start() function. After processing the runtime
+// config returned from Start(), it also runs the device's Register() function irrespective of
+// whether the instance is running or not.
+func (vm *vmQemu) deviceStart(deviceName string, rawConfig deviceConfig.Device, isRunning bool) (*device.RunConfig, error) {
+	d, _, err := vm.deviceLoad(deviceName, rawConfig)
+	if err != nil {
+		return nil, err
+	}
+
+	if canHotPlug, _ := d.CanHotPlug(); isRunning && !canHotPlug {
+		return nil, fmt.Errorf("Device cannot be started when instance is running")
+	}
+
+	runConf, err := d.Start()
+	if err != nil {
+		return nil, err
+	}
+
+	return runConf, nil
+}
+
+// deviceStop loads a new device and calls its Stop() function.
+func (vm *vmQemu) deviceStop(deviceName string, rawConfig deviceConfig.Device) error {
+	d, _, err := vm.deviceLoad(deviceName, rawConfig)
+
+	// If deviceLoad fails with unsupported device type then return.
+	if err == device.ErrUnsupportedDevType {
+		return err
+	}
+
+	// If deviceLoad fails for any other reason then just log the error and proceed, as in the
+	// scenario that a new version of LXD has additional validation restrictions than older
+	// versions we still need to allow previously valid devices to be stopped.
+	if err != nil {
+		// If there is no device returned, then we cannot proceed, so return as error.
+		if d == nil {
+			return fmt.Errorf("Device stop validation failed for '%s': %v", deviceName, err)
+
+		}
+
+		logger.Errorf("Device stop validation failed for '%s': %v", deviceName, err)
+	}
+
+	canHotPlug, _ := d.CanHotPlug()
+
+	// An empty netns path means we haven't been called from the LXC stop hook, so are running.
+	if vm.IsRunning() && !canHotPlug {
+		return fmt.Errorf("Device cannot be stopped when instance is running")
+	}
+
+	runConf, err := d.Stop()
+	if err != nil {
+		return err
+	}
+
+	if runConf != nil {
+		// Run post stop hooks irrespective of run state of instance.
+		err = vm.runHooks(runConf.PostHooks)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// runHooks executes the callback functions returned from a function.
+func (vm *vmQemu) runHooks(hooks []func() error) error {
+	// Run any post start hooks.
+	if len(hooks) > 0 {
+		for _, hook := range hooks {
+			err := hook()
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (vm *vmQemu) getMonitorPath() string {
+	return vm.DevicesPath() + "/qemu.monitor"
+}
+
+func (vm *vmQemu) getNvramPath() string {
+	return vm.DevicesPath() + "/qemu.nvram"
+}
+
+func (vm *vmQemu) generateConfigDrive() (string, error) {
+	configDrivePath := vm.Path() + "/config"
+
+	// Create config drive dir.
+	err := os.MkdirAll(configDrivePath, 0100)
+	if err != nil {
+		return "", err
+	}
+
+	path, err := exec.LookPath("vm-agent")
+	if err != nil {
+		return "", err
+	}
+
+	// Install agent into config drive dir.
+	_, err = shared.RunCommand("cp", path, configDrivePath+"/")
+	if err != nil {
+		return "", err
+	}
+
+	vendorData := `#cloud-config
+runcmd:
+ - "mkdir /media/lxd_config"
+ - "mount -o ro -t iso9660 /dev/disk/by-label/cidata /media/lxd_config"
+ - "cp /media/lxd_config/media-lxd_config.mount /etc/systemd/system/"
+ - "cp /media/lxd_config/lxd-agent.service /etc/systemd/system/"
+ - "systemctl enable media-lxd_config.mount"
+ - "systemctl enable lxd-agent.service"
+ - "systemctl start lxd-agent"
+`
+
+	err = ioutil.WriteFile(configDrivePath+"/vendor-data", []byte(vendorData), 0600)
+	if err != nil {
+		return "", err
+	}
+
+	userData := vm.expandedConfig["user.user-data"]
+
+	// Use an empty user-data file if no custom user-data supplied.
+	if userData == "" {
+		userData = "#cloud-config"
+	}
+
+	err = ioutil.WriteFile(configDrivePath+"/user-data", []byte(userData), 0600)
+	if err != nil {
+		return "", err
+	}
+
+	metaData := fmt.Sprintf(`instance-id: %s
+local-hostname: %s
+`, vm.Name(), vm.Name())
+
+	err = ioutil.WriteFile(configDrivePath+"/meta-data", []byte(metaData), 0600)
+	if err != nil {
+		return "", err
+	}
+
+	lxdAgentServiceUnit := `[Unit]
+Description=LXD - agent
+After=media-lxd_config.mount
+[Service]
+Type=simple
+ExecStart=/media/lxd_config/vm-agent
+[Install]
+WantedBy=multi-user.target
+`
+
+	err = ioutil.WriteFile(configDrivePath+"/lxd-agent.service", []byte(lxdAgentServiceUnit), 0600)
+	if err != nil {
+		return "", err
+	}
+
+	lxdConfigDriveMountUnit := `[Unit]
+Description = LXD - config drive
+Before=local-fs.target
+[Mount]
+Where=/media/lxd_config
+What=/dev/disk/by-label/cidata
+Type=iso9660
+[Install]
+WantedBy=multi-user.target
+`
+
+	err = ioutil.WriteFile(configDrivePath+"/media-lxd_config.mount", []byte(lxdConfigDriveMountUnit), 0600)
+	if err != nil {
+		return "", err
+	}
+
+	// Finally convert the config drive dir into an ISO file. The cidata label is important
+	// as this is what cloud-init uses to detect, mount the drive and run the cloud-init
+	// templates on first boot. The vendor-data template then modifies the system so that the
+	// config drive is mounted and the agent is started on subsequent boots.
+	isoPath := vm.Path() + "/config.iso"
+	_, err = shared.RunCommand("mkisofs", "-R", "-V", "cidata", "-o", isoPath, configDrivePath)
+	if err != nil {
+		return "", err
+	}
+
+	return isoPath, nil
+}
+
+// generateQemuConfigFile writes the qemu config file.
+func (vm *vmQemu) generateQemuConfigFile(configISOPath string, tapDev map[string]string) (string, error) {
+	_, _, onDiskPoolName := vm.storage.GetContainerPoolInfo()
+	volumeName := project.Prefix(vm.Project(), vm.Name())
+	// TODO add function to the storage API to get block device path and return as disk device runConf.
+	rootDrive := fmt.Sprintf("/dev/zvol/%s/virtual-machines/%s", onDiskPoolName, volumeName)
+	monitorPath := vm.getMonitorPath()
+	nvramPath := vm.getNvramPath()
+	vsockID := vm.vsockID()
+
+	conf := fmt.Sprintf(`
+# Machine
+[machine]
+graphics = "off"
+type = "q35"
+accel = "kvm"
+usb = "off"
+graphics = "off"
+[global]
+driver = "ICH9-LPC"
+property = "disable_s3"
+value = "1"
+[global]
+driver = "ICH9-LPC"
+property = "disable_s4"
+value = "1"
+[boot-opts]
+strict = "on"
+# CPU
+[smp-opts]
+cpus = "4"
+sockets = "1"
+cores = "2"
+threads = "2"
+# Memory
+[memory]
+size = "2G"
+# Firmware
+[drive]
+file = "/usr/share/OVMF/OVMF_CODE.fd"
+if = "pflash"
+format = "raw"
+unit = "0"
+readonly = "on"
+[drive]
+file = "%s"
+if = "pflash"
+format = "raw"
+unit = "1"
+# Console
+[chardev "console"]
+backend = "pty"
+# Qemu control
+[chardev "monitor"]
+backend = "socket"
+path = "%s"
+server = "on"
+wait = "off"
+[mon]
+chardev = "monitor"
+mode = "control"
+# SCSI root
+[device "qemu_pcie1"]
+driver = "pcie-root-port"
+port = "0x10"
+chassis = "1"
+bus = "pcie.0"
+multifunction = "on"
+addr = "0x2"
+[device "qemu_scsi"]
+driver = "virtio-scsi-pci"
+bus = "qemu_pcie1"
+addr = "0x0"
+# Balloon driver
+[device "qemu_pcie2"]
+driver = "pcie-root-port"
+port = "0x12"
+chassis = "2"
+bus = "pcie.0"
+addr = "0x2.0x1"
+[device "qemu_ballon"]
+driver = "virtio-balloon-pci"
+bus = "qemu_pcie2"
+addr = "0x0"
+# Random number generator
+[object "qemu_rng"]
+qom-type = "rng-random"
+filename = "/dev/urandom"
+[device "qemu_pcie3"]
+driver = "pcie-root-port"
+port = "0x13"
+chassis = "3"
+bus = "pcie.0"
+addr = "0x2.0x2"
+[device "dev-qemu_rng"]
+driver = "virtio-rng-pci"
+rng = "qemu_rng"
+bus = "qemu_pcie3"
+addr = "0x0"
+# Vsock
+[device "qemu_pcie4"]
+driver = "pcie-root-port"
+port = "0x13"
+chassis = "4"
+bus = "pcie.0"
+addr = "0x2.0x3"
+[device]
+driver = "vhost-vsock-pci"
+guest-cid = "%d"
+bus = "qemu_pcie4"
+addr = "0x0"
+# Root drive ("root" device)
+[drive "lxd_root"]
+file = "%s"
+format = "raw"
+if = "none"
+cache = "none"
+aio = "native"
+[device "dev-lxd_root"]
+driver = "scsi-hd"
+bus = "qemu_scsi.0"
+channel = "0"
+scsi-id = "0"
+lun = "1"
+drive = "lxd_root"
+bootindex = "1"
+# Config drive (set to last lun)
+[drive "qemu_config"]
+file = "%s"
+format = "raw"
+if = "none"
+cache = "none"
+aio = "native"
+readonly = "on"
+[device "dev-qemu_config"]
+driver = "scsi-hd"
+bus = "qemu_scsi.0"
+channel = "0"
+scsi-id = "1"
+lun = "1"
+drive = "qemu_config"
+# Network card ("eth0" device)
+[netdev "lxd_eth0"]
+type = "tap"
+ifname = "%s"
+script = "no"
+downscript = "no"
+[device "qemu_pcie5"]
+driver = "pcie-root-port"
+port = "0x11"
+chassis = "5"
+bus = "pcie.0"
+addr = "0x2.0x4"
+[device "dev-lxd_eth0"]
+driver = "virtio-net-pci"
+netdev = "lxd_eth0"
+mac = "%s"
+bus = "qemu_pcie5"
+addr = "0x0"
+bootindex = "2"
+`, nvramPath, monitorPath, vsockID, rootDrive, configISOPath, tapDev["tap"], tapDev["hwaddr"])
+	configPath := filepath.Join(vm.LogPath(), "qemu.conf")
+	return configPath, ioutil.WriteFile(configPath, []byte(conf), 0640)
+}
+
+func (vm *vmQemu) pidFilePath() string {
+	return vm.DevicesPath() + "/qemu.pid"
+}
+
+func (vm *vmQemu) pid() (int, error) {
+	pidStr, err := ioutil.ReadFile(vm.pidFilePath())
+	if os.IsNotExist(err) {
+		return 0, nil
+	}
+
+	if err != nil {
+		return -1, err
+	}
+
+	pid, err := strconv.Atoi(strings.TrimSpace(string(pidStr)))
+	if err != nil {
+		return -1, err
+	}
+
+	return pid, nil
+}
+
+func (vm *vmQemu) Stop(stateful bool) error {
+	if stateful {
+		return fmt.Errorf("Stateful stop isn't supported for VMs at this time")
+	}
+
+	if !vm.IsRunning() {
+		return fmt.Errorf("Instance is not running")
+	}
+
+	// Connect to the monitor.
+	monitor, err := qmp.NewSocketMonitor("unix", vm.getMonitorPath(), vmVsockTimeout)
+	if err != nil {
+		return err
+	}
+
+	err = monitor.Connect()
+	if err != nil {
+		return err
+	}
+	defer monitor.Disconnect()
+
+	// Send the quit command.
+	_, err = monitor.Run([]byte("{'execute': 'quit'}"))
+	if err != nil {
+		return err
+	}
+	monitor.Disconnect()
+
+	pid, err := vm.pid()
+	if err != nil {
+		return err
+	}
+
+	// No PID found, qemu not running.
+	if pid < 0 {
+		return nil
+	}
+
+	// Check if qemu process still running, if so wait.
+	for {
+		procPath := fmt.Sprintf("/proc/%d", pid)
+		if shared.PathExists(procPath) {
+			time.Sleep(500 * time.Millisecond)
+			continue
+		}
+
+		break
+	}
+
+	vm.cleanupDevices()
+	os.Remove(vm.pidFilePath())
+	os.Remove(vm.getMonitorPath())
+
+	return nil
+}
+
+func (vm *vmQemu) Unfreeze() error {
+	return fmt.Errorf("Unfreeze Not implemented")
+}
+
+func (vm *vmQemu) IsPrivileged() bool {
+	return shared.IsTrue(vm.expandedConfig["security.privileged"])
+}
+
+func (vm *vmQemu) Restore(source Instance, stateful bool) error {
+	return fmt.Errorf("Restore Not implemented")
+}
+
+func (vm *vmQemu) Snapshots() ([]Instance, error) {
+	return []Instance{}, nil
+}
+
+func (vm *vmQemu) Backups() ([]backup.Backup, error) {
+	return []backup.Backup{}, nil
+}
+
+func (vm *vmQemu) Rename(newName string) error {
+	return fmt.Errorf("Rename Not implemented")
+}
+
+func (vm *vmQemu) Update(args db.InstanceArgs, userRequested bool) error {
+	return fmt.Errorf("Update Not implemented")
+}
+
+func (vm *vmQemu) removeUnixDevices() error {
+	// Check that we indeed have devices to remove.
+	if !shared.PathExists(vm.DevicesPath()) {
+		return nil
+	}
+
+	// Load the directory listing.
+	dents, err := ioutil.ReadDir(vm.DevicesPath())
+	if err != nil {
+		return err
+	}
+
+	// Go through all the unix devices.
+	for _, f := range dents {
+		// Skip non-Unix devices.
+		if !strings.HasPrefix(f.Name(), "forkmknod.unix.") && !strings.HasPrefix(f.Name(), "unix.") && !strings.HasPrefix(f.Name(), "infiniband.unix.") {
+			continue
+		}
+
+		// Remove the entry
+		devicePath := filepath.Join(vm.DevicesPath(), f.Name())
+		err := os.Remove(devicePath)
+		if err != nil {
+			logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
+		}
+	}
+
+	return nil
+}
+
+func (vm *vmQemu) removeDiskDevices() error {
+	// Check that we indeed have devices to remove.vm
+	if !shared.PathExists(vm.DevicesPath()) {
+		return nil
+	}
+
+	// Load the directory listing.
+	dents, err := ioutil.ReadDir(vm.DevicesPath())
+	if err != nil {
+		return err
+	}
+
+	// Go through all the unix devices
+	for _, f := range dents {
+		// Skip non-disk devices
+		if !strings.HasPrefix(f.Name(), "disk.") {
+			continue
+		}
+
+		// Always try to unmount the host side
+		_ = unix.Unmount(filepath.Join(vm.DevicesPath(), f.Name()), unix.MNT_DETACH)
+
+		// Remove the entry
+		diskPath := filepath.Join(vm.DevicesPath(), f.Name())
+		err := os.Remove(diskPath)
+		if err != nil {
+			logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
+		}
+	}
+
+	return nil
+}
+
+func (vm *vmQemu) cleanup() {
+	// Unmount any leftovers
+	vm.removeUnixDevices()
+	vm.removeDiskDevices()
+
+	// Remove the devices path
+	os.Remove(vm.DevicesPath())
+
+	// Remove the shmounts path
+	os.RemoveAll(vm.ShmountsPath())
+}
+
+// cleanupDevices performs any needed device cleanup steps when instance is stopped.
+func (vm *vmQemu) cleanupDevices() {
+	for _, dev := range vm.expandedDevices.Sorted() {
+		// Use the device interface if device supports it.
+		err := vm.deviceStop(dev.Name, dev.Config)
+		if err == device.ErrUnsupportedDevType {
+			continue
+		} else if err != nil {
+			logger.Errorf("Failed to stop device '%s': %v", dev.Name, err)
+		}
+	}
+}
+
+func (vm *vmQemu) init() error {
+	// Compute the expanded config and device list.
+	err := vm.expandConfig(nil)
+	if err != nil {
+		return err
+	}
+
+	err = vm.expandDevices(nil)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Initialize storage interface for this instance.
+func (vm *vmQemu) initStorage() error {
+	if vm.storage != nil {
+		return nil
+	}
+
+	s, err := storagePoolVolumeContainerLoadInit(vm.state, vm.Project(), vm.Name())
+	if err != nil {
+		return err
+	}
+
+	vm.storage = s
+
+	return nil
+}
+
+func (vm *vmQemu) Delete() error {
+	ctxMap := log.Ctx{
+		"project":   vm.project,
+		"name":      vm.name,
+		"created":   vm.creationDate,
+		"ephemeral": vm.ephemeral,
+		"used":      vm.lastUsedDate}
+
+	logger.Info("Deleting instance", ctxMap)
+
+	if shared.IsTrue(vm.expandedConfig["security.protection.delete"]) && !vm.IsSnapshot() {
+		err := fmt.Errorf("Instance is protected")
+		logger.Warn("Failed to delete instance", log.Ctx{"name": vm.Name(), "err": err})
+		return err
+	}
+
+	// Check if we're dealing with "lxd import".
+	isImport := false
+	if vm.storage != nil {
+		_, poolName, _ := vm.storage.GetContainerPoolInfo()
+
+		if vm.IsSnapshot() {
+			vmName, _, _ := shared.ContainerGetParentAndSnapshotName(vm.name)
+			if shared.PathExists(shared.VarPath("storage-pools", poolName, "containers", vmName, ".importing")) {
+				isImport = true
+			}
+		} else {
+			if shared.PathExists(shared.VarPath("storage-pools", poolName, "containers", vm.name, ".importing")) {
+				isImport = true
+			}
+		}
+	}
+
+	// Attempt to initialize storage interface for the instance.
+	err := vm.initStorage()
+	if err != nil {
+		logger.Warnf("Failed to init storage: %v", err)
+	}
+
+	if vm.IsSnapshot() {
+		// Remove the snapshot.
+		if vm.storage != nil && !isImport {
+			err := vm.storage.ContainerSnapshotDelete(vm)
+			if err != nil {
+				logger.Warn("Failed to delete snapshot", log.Ctx{"name": vm.Name(), "err": err})
+				return err
+			}
+		}
+	} else {
+		// Remove all snapshots.
+		err := containerDeleteSnapshots(vm.state, vm.Project(), vm.Name())
+		if err != nil {
+			logger.Warn("Failed to delete snapshots", log.Ctx{"name": vm.Name(), "err": err})
+			return err
+		}
+
+		// Remove all backups.
+		backups, err := vm.Backups()
+		if err != nil {
+			return err
+		}
+
+		for _, backup := range backups {
+			err = backup.Delete()
+			if err != nil {
+				return err
+			}
+		}
+
+		// Clean things up.
+		vm.cleanup()
+
+		// Delete the container from disk.
+		if vm.storage != nil && !isImport {
+			err := vm.storage.ContainerDelete(vm)
+			if err != nil {
+				logger.Error("Failed deleting instance storage", log.Ctx{"name": vm.Name(), "err": err})
+				return err
+			}
+		}
+
+		// Delete the MAAS entry.
+		err = vm.maasDelete()
+		if err != nil {
+			logger.Error("Failed deleting instance MAAS record", log.Ctx{"name": vm.Name(), "err": err})
+			return err
+		}
+
+		// Remove devices from container.
+		for k, m := range vm.expandedDevices {
+			err = vm.deviceRemove(k, m)
+			if err != nil && err != device.ErrUnsupportedDevType {
+				return errors.Wrapf(err, "Failed to remove device '%s'", k)
+			}
+		}
+	}
+
+	// Remove the database record
+	if err := vm.state.Cluster.InstanceRemove(vm.project, vm.Name()); err != nil {
+		logger.Error("Failed deleting instance entry", log.Ctx{"name": vm.Name(), "err": err})
+		return err
+	}
+
+	// Remove the database entry for the pool device
+	if vm.storage != nil {
+		// Get the name of the storage pool the container is attached to. This
+		// reverse-engineering works because container names are globally
+		// unique.
+		poolID, _, _ := vm.storage.GetContainerPoolInfo()
+
+		// Remove volume from storage pool.
+		err := vm.state.Cluster.StoragePoolVolumeDelete(vm.Project(), vm.Name(), db.StoragePoolVolumeTypeVM, poolID)
+		if err != nil {
+			return err
+		}
+	}
+
+	logger.Info("Deleted instance", ctxMap)
+
+	// tomp TODO should these event names be changed?
+	if vm.IsSnapshot() {
+		vm.state.Events.SendLifecycle(vm.project, "container-snapshot-deleted",
+			fmt.Sprintf("/1.0/containers/%s", vm.name), map[string]interface{}{
+				"snapshot_name": vm.name,
+			})
+	} else {
+		vm.state.Events.SendLifecycle(vm.project, "container-deleted",
+			fmt.Sprintf("/1.0/containers/%s", vm.name), nil)
+	}
+
+	return nil
+}
+
+func (vm *vmQemu) deviceAdd(deviceName string, rawConfig deviceConfig.Device) error {
+	return nil
+}
+
+func (vm *vmQemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device) error {
+	return nil
+}
+
+func (vm *vmQemu) Export(w io.Writer, properties map[string]string) error {
+	return fmt.Errorf("Export Not implemented")
+}
+
+func (vm *vmQemu) CGroupGet(key string) (string, error) {
+	return "", fmt.Errorf("CGroupGet Not implemented")
+}
+
+func (vm *vmQemu) CGroupSet(key string, value string) error {
+	return fmt.Errorf("CGroupSet Not implemented")
+}
+
+func (vm *vmQemu) VolatileSet(changes map[string]string) error {
+	// Sanity check.
+	for key := range changes {
+		if !strings.HasPrefix(key, "volatile.") {
+			return fmt.Errorf("Only volatile keys can be modified with VolatileSet")
+		}
+	}
+
+	// Update the database.
+	var err error
+	if vm.IsSnapshot() {
+		err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.InstanceSnapshotConfigUpdate(vm.id, changes)
+		})
+	} else {
+		err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
+			return tx.ContainerConfigUpdate(vm.id, changes)
+		})
+	}
+	if err != nil {
+		return errors.Wrap(err, "Failed to volatile config")
+	}
+
+	// Apply the change locally.
+	for key, value := range changes {
+		if value == "" {
+			delete(vm.expandedConfig, key)
+			delete(vm.localConfig, key)
+			continue
+		}
+
+		vm.expandedConfig[key] = value
+		vm.localConfig[key] = value
+	}
+
+	return nil
+}
+
+func (vm *vmQemu) FileExists(path string) error {
+	return fmt.Errorf("FileExists Not implemented")
+}
+
+func (vm *vmQemu) FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error) {
+	return 0, 0, 0, "", nil, fmt.Errorf("FilePull Not implemented")
+}
+
+func (vm *vmQemu) FilePush(type_ string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error {
+	return fmt.Errorf("FilePush Not implemented")
+}
+
+func (vm *vmQemu) FileRemove(path string) error {
+	return fmt.Errorf("FileRemove Not implemented")
+}
+
+func (vm *vmQemu) Console(terminal *os.File) *exec.Cmd {
+	// Connect to the monitor.
+	monitor, err := qmp.NewSocketMonitor("unix", vm.getMonitorPath(), vmVsockTimeout)
+	if err != nil {
+		return nil // The VM isn't running as no monitor socket available.
+	}
+
+	err = monitor.Connect()
+	if err != nil {
+		return nil // The capabilities handshake failed.
+	}
+	defer monitor.Disconnect()
+
+	// Send the status command.
+	respRaw, err := monitor.Run([]byte("{'execute': 'query-chardev'}"))
+	if err != nil {
+		return nil // Status command failed.
+	}
+
+	var respDecoded struct {
+		Return []struct {
+			Label    string `json:"label"`
+			Filename string `json:"filename"`
+		} `json:"return"`
+	}
+
+	err = json.Unmarshal(respRaw, &respDecoded)
+	if err != nil {
+		return nil // JSON decode failed.
+	}
+
+	var ptsPath string
+
+	for _, v := range respDecoded.Return {
+		if v.Label == "console" {
+			ptsPath = strings.TrimPrefix(v.Filename, "pty:")
+		}
+	}
+
+	if ptsPath == "" {
+		return nil
+	}
+
+	args := []string{
+		"screen",
+		ptsPath,
+	}
+
+	cmd := exec.Cmd{}
+	cmd.Path = "/usr/bin/screen" // TODO dont rely on screen.
+	cmd.Args = args
+	cmd.Stdin = terminal
+	cmd.Stdout = terminal
+	cmd.Stderr = terminal
+	return &cmd
+}
+
+func (vm *vmQemu) Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, wait bool, cwd string, uid uint32, gid uint32) (*exec.Cmd, int, int, error) {
+	return nil, 0, 0, fmt.Errorf("Exec Not implemented")
+
+}
+
+func (vm *vmQemu) Render() (interface{}, interface{}, error) {
+	// Ignore err as the arch string on error is correct (unknown)
+	architectureName, _ := osarch.ArchitectureName(vm.architecture)
+
+	if vm.IsSnapshot() {
+		// Prepare the ETag
+		etag := []interface{}{vm.expiryDate}
+
+		vmSnap := api.InstanceSnapshot{
+			CreatedAt:       vm.creationDate,
+			ExpandedConfig:  vm.expandedConfig,
+			ExpandedDevices: vm.expandedDevices.CloneNative(),
+			LastUsedAt:      vm.lastUsedDate,
+			Name:            strings.SplitN(vm.name, "/", 2)[1],
+			Stateful:        vm.stateful,
+		}
+		vmSnap.Architecture = architectureName
+		vmSnap.Config = vm.localConfig
+		vmSnap.Devices = vm.localDevices.CloneNative()
+		vmSnap.Ephemeral = vm.ephemeral
+		vmSnap.Profiles = vm.profiles
+		vmSnap.ExpiresAt = vm.expiryDate
+
+		return &vmSnap, etag, nil
+	}
+
+	// Prepare the ETag
+	etag := []interface{}{vm.architecture, vm.localConfig, vm.localDevices, vm.ephemeral, vm.profiles}
+
+	vmState := api.Instance{
+		ExpandedConfig:  vm.expandedConfig,
+		ExpandedDevices: vm.expandedDevices.CloneNative(),
+		Name:            vm.name,
+		Status:          vm.statusCode().String(),
+		StatusCode:      vm.statusCode(),
+		Location:        vm.node,
+		Type:            vm.Type().String(),
+	}
+
+	vmState.Description = vm.description
+	vmState.Architecture = architectureName
+	vmState.Config = vm.localConfig
+	vmState.CreatedAt = vm.creationDate
+	vmState.Devices = vm.localDevices.CloneNative()
+	vmState.Ephemeral = vm.ephemeral
+	vmState.LastUsedAt = vm.lastUsedDate
+	vmState.Profiles = vm.profiles
+	vmState.Stateful = vm.stateful
+
+	return &vmState, etag, nil
+}
+
+func (vm *vmQemu) RenderFull() (*api.InstanceFull, interface{}, error) {
+	if vm.IsSnapshot() {
+		return nil, nil, fmt.Errorf("RenderFull doesn't work with snapshots")
+	}
+
+	// Get the Instance struct.
+	base, etag, err := vm.Render()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Convert to InstanceFull.
+	vmState := api.InstanceFull{Instance: *base.(*api.Instance)}
+
+	// Add the InstanceState.
+	vmState.State, err = vm.RenderState()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Add the InstanceSnapshots.
+	snaps, err := vm.Snapshots()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	for _, snap := range snaps {
+		render, _, err := snap.Render()
+		if err != nil {
+			return nil, nil, err
+		}
+
+		if vmState.Snapshots == nil {
+			vmState.Snapshots = []api.InstanceSnapshot{}
+		}
+
+		vmState.Snapshots = append(vmState.Snapshots, *render.(*api.InstanceSnapshot))
+	}
+
+	// Add the InstanceBackups.
+	backups, err := vm.Backups()
+	if err != nil {
+		return nil, nil, err
+	}
+
+	for _, backup := range backups {
+		render := backup.Render()
+
+		if vmState.Backups == nil {
+			vmState.Backups = []api.InstanceBackup{}
+		}
+
+		vmState.Backups = append(vmState.Backups, *render)
+	}
+
+	return &vmState, etag, nil
+}
+
+func (vm *vmQemu) RenderState() (*api.InstanceState, error) {
+	statusCode := vm.statusCode()
+	pid, _ := vm.pid()
+
+	status, err := vm.agentGetState()
+	if err == nil {
+		status.Pid = int64(pid)
+		status.Status = statusCode.String()
+		status.StatusCode = statusCode
+
+		return status, nil
+	}
+
+	// At least return the Status and StatusCode if we couldn't get any
+	// information for the VM agent.
+	return &api.InstanceState{
+		Pid:        int64(pid),
+		Status:     statusCode.String(),
+		StatusCode: statusCode,
+	}, nil
+}
+
+// agentGetState connects to the agent inside of the VM and does
+// an API call to get the current state.
+func (vm *vmQemu) agentGetState() (*api.InstanceState, error) {
+	var status api.InstanceState
+
+	// Ensure the correct vhost_vsock kernel module is loaded before establishing the vsock.
+	err := util.LoadModule("vhost_vsock")
+	if err != nil {
+		return nil, err
+	}
+
+	client := http.Client{
+		Transport: &http.Transport{
+			Dial: func(network, addr string) (net.Conn, error) {
+				return vsock.Dial(uint32(vm.vsockID()), 8443)
+			},
+		},
+	}
+
+	resp, err := client.Get("http://vm.socket/state")
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	err = json.NewDecoder(resp.Body).Decode(&status)
+	if err != nil {
+		return nil, err
+	}
+
+	return &status, nil
+}
+
+func (vm *vmQemu) IsRunning() bool {
+	state := vm.State()
+	return state != "BROKEN" && state != "STOPPED"
+}
+
+func (vm *vmQemu) IsFrozen() bool {
+	return vm.State() == "FROZEN"
+}
+
+func (vm *vmQemu) IsEphemeral() bool {
+	return vm.ephemeral
+}
+
+func (vm *vmQemu) IsSnapshot() bool {
+	return vm.snapshot
+}
+
+func (vm *vmQemu) IsStateful() bool {
+	return vm.stateful
+}
+
+func (vm *vmQemu) DeviceEventHandler(runConf *device.RunConfig) error {
+	return fmt.Errorf("DeviceEventHandler Not implemented")
+}
+
+func (vm *vmQemu) ID() int {
+	return vm.id
+}
+
+// vsockID returns the vsock context ID, 3 being the first ID that
+// can be used.
+func (vm *vmQemu) vsockID() int {
+	return vm.id + 3
+}
+
+func (vm *vmQemu) Location() string {
+	return vm.node
+}
+
+func (vm *vmQemu) Project() string {
+	return vm.project
+}
+
+func (vm *vmQemu) Name() string {
+	return vm.name
+}
+
+func (vm *vmQemu) Type() instancetype.Type {
+	return vm.dbType
+}
+
+func (vm *vmQemu) Description() string {
+	return vm.description
+}
+
+func (vm *vmQemu) Architecture() int {
+	return vm.architecture
+}
+
+func (vm *vmQemu) CreationDate() time.Time {
+	return vm.creationDate
+}
+func (vm *vmQemu) LastUsedDate() time.Time {
+	return vm.lastUsedDate
+}
+
+func (vm *vmQemu) expandConfig(profiles []api.Profile) error {
+	if profiles == nil && len(vm.profiles) > 0 {
+		var err error
+		profiles, err = vm.state.Cluster.ProfilesGet(vm.project, vm.profiles)
+		if err != nil {
+			return err
+		}
+	}
+
+	vm.expandedConfig = db.ProfilesExpandConfig(vm.localConfig, profiles)
+
+	return nil
+}
+
+func (vm *vmQemu) expandDevices(profiles []api.Profile) error {
+	if profiles == nil && len(vm.profiles) > 0 {
+		var err error
+		profiles, err = vm.state.Cluster.ProfilesGet(vm.project, vm.profiles)
+		if err != nil {
+			return err
+		}
+	}
+
+	vm.expandedDevices = db.ProfilesExpandDevices(vm.localDevices, profiles)
+
+	return nil
+}
+
+func (vm *vmQemu) ExpandedConfig() map[string]string {
+	return vm.expandedConfig
+}
+
+func (vm *vmQemu) ExpandedDevices() deviceConfig.Devices {
+	return vm.expandedDevices
+}
+
+func (vm *vmQemu) LocalConfig() map[string]string {
+	return vm.localConfig
+}
+
+func (vm *vmQemu) LocalDevices() deviceConfig.Devices {
+	return vm.localDevices
+}
+
+func (vm *vmQemu) Profiles() []string {
+	return vm.profiles
+}
+
+func (vm *vmQemu) InitPID() int {
+	pid, _ := vm.pid()
+	return pid
+}
+
+func (vm *vmQemu) statusCode() api.StatusCode {
+	// Connect to the monitor.
+	monitor, err := qmp.NewSocketMonitor("unix", vm.getMonitorPath(), vmVsockTimeout)
+	if err != nil {
+		return api.Stopped // The VM isn't running as no monitor socket available.
+	}
+
+	err = monitor.Connect()
+	if err != nil {
+		return api.Error // The capabilities handshake failed.
+	}
+	defer monitor.Disconnect()
+
+	// Send the status command.
+	respRaw, err := monitor.Run([]byte("{'execute': 'query-status'}"))
+	if err != nil {
+		return api.Error // Status command failed.
+	}
+
+	var respDecoded struct {
+		ID     string `json:"id"`
+		Return struct {
+			Running    bool   `json:"running"`
+			Singlestep bool   `json:"singlestep"`
+			Status     string `json:"status"`
+		} `json:"return"`
+	}
+
+	err = json.Unmarshal(respRaw, &respDecoded)
+	if err != nil {
+		return api.Error // JSON decode failed.
+	}
+
+	if respDecoded.Return.Status == "running" {
+		return api.Running
+	}
+
+	return api.Stopped
+}
+
+func (vm *vmQemu) State() string {
+	return strings.ToUpper(vm.statusCode().String())
+}
+
+func (vm *vmQemu) ExpiryDate() time.Time {
+	if vm.IsSnapshot() {
+		return vm.expiryDate
+	}
+
+	// Return zero time if the container is not a snapshot.
+	return time.Time{}
+}
+
+func (vm *vmQemu) Path() string {
+	name := project.Prefix(vm.Project(), vm.Name())
+	return storagePools.ContainerPath(name, vm.IsSnapshot())
+}
+
+func (vm *vmQemu) DevicesPath() string {
+	name := project.Prefix(vm.Project(), vm.Name())
+	return shared.VarPath("devices", name)
+}
+
+func (vm *vmQemu) ShmountsPath() string {
+	name := project.Prefix(vm.Project(), vm.Name())
+	return shared.VarPath("shmounts", name)
+}
+
+func (vm *vmQemu) LogPath() string {
+	name := project.Prefix(vm.Project(), vm.Name())
+	return shared.LogPath(name)
+}
+
+func (vm *vmQemu) LogFilePath() string {
+	return filepath.Join(vm.LogPath(), "lxvm.log")
+}
+
+func (vm *vmQemu) ConsoleBufferLogPath() string {
+	return filepath.Join(vm.LogPath(), "console.log")
+}
+
+func (vm *vmQemu) RootfsPath() string {
+	return filepath.Join(vm.Path(), "rootfs")
+}
+
+func (vm *vmQemu) TemplatesPath() string {
+	return filepath.Join(vm.Path(), "templates")
+}
+
+func (vm *vmQemu) StatePath() string {
+	return filepath.Join(vm.Path(), "state")
+}
+
+func (vm *vmQemu) StoragePool() (string, error) {
+	poolName, err := vm.state.Cluster.InstancePool(vm.Project(), vm.Name())
+	if err != nil {
+		return "", err
+	}
+
+	return poolName, nil
+}
+
+func (vm *vmQemu) SetOperation(op *operations.Operation) {
+	vm.op = op
+}
+
+func (vm *vmQemu) StorageStart() (bool, error) {
+	// Initialize storage interface for the container.
+	err := vm.initStorage()
+	if err != nil {
+		return false, err
+	}
+
+	return false, nil
+}
+
+func (vm *vmQemu) StorageStop() (bool, error) {
+	return false, nil
+}
+
+func (vm *vmQemu) Storage() storage {
+	if vm.storage == nil {
+		vm.initStorage()
+	}
+
+	return vm.storage
+}
+
+func (vm *vmQemu) DeferTemplateApply(trigger string) error {
+	return nil
+}
+
+func (vm *vmQemu) DaemonState() *state.State {
+	// FIXME: This function should go away, since the abstract container
+	//        interface should not be coupled with internal state details.
+	//        However this is not currently possible, because many
+	//        higher-level APIs use container variables as "implicit
+	//        handles" to database/OS state and then need a way to get a
+	//        reference to it.
+	return vm.state
+}
+
+// fillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
+// generated name and hwaddr properties if these are missing from the device.
+func (vm *vmQemu) fillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
+	newDevice := m.Clone()
+	updateKey := func(key string, value string) error {
+		tx, err := vm.state.Cluster.Begin()
+		if err != nil {
+			return err
+		}
+
+		err = db.ContainerConfigInsert(tx, vm.id, map[string]string{key: value})
+		if err != nil {
+			tx.Rollback()
+			return err
+		}
+
+		err = db.TxCommit(tx)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	// Fill in the MAC address
+	if !shared.StringInSlice(m["nictype"], []string{"physical", "ipvlan", "sriov"}) && m["hwaddr"] == "" {
+		configKey := fmt.Sprintf("volatile.%s.hwaddr", name)
+		volatileHwaddr := vm.localConfig[configKey]
+		if volatileHwaddr == "" {
+			// Generate a new MAC address
+			volatileHwaddr, err := deviceNextInterfaceHWAddr()
+			if err != nil {
+				return nil, err
+			}
+
+			// Update the database
+			err = query.Retry(func() error {
+				err := updateKey(configKey, volatileHwaddr)
+				if err != nil {
+					// Check if something else filled it in behind our back
+					value, err1 := vm.state.Cluster.ContainerConfigGet(vm.id, configKey)
+					if err1 != nil || value == "" {
+						return err
+					}
+
+					vm.localConfig[configKey] = value
+					vm.expandedConfig[configKey] = value
+					return nil
+				}
+
+				vm.localConfig[configKey] = volatileHwaddr
+				vm.expandedConfig[configKey] = volatileHwaddr
+				return nil
+			})
+			if err != nil {
+				return nil, err
+			}
+		}
+		newDevice["hwaddr"] = volatileHwaddr
+	}
+
+	return newDevice, nil
+}
+
+// Internal MAAS handling.
+func (vm *vmQemu) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
+	interfaces := []maas.ContainerInterface{}
+	for k, m := range devices {
+		if m["type"] != "nic" {
+			continue
+		}
+
+		if m["maas.subnet.ipv4"] == "" && m["maas.subnet.ipv6"] == "" {
+			continue
+		}
+
+		m, err := vm.fillNetworkDevice(k, m)
+		if err != nil {
+			return nil, err
+		}
+
+		subnets := []maas.ContainerInterfaceSubnet{}
+
+		// IPv4
+		if m["maas.subnet.ipv4"] != "" {
+			subnet := maas.ContainerInterfaceSubnet{
+				Name:    m["maas.subnet.ipv4"],
+				Address: m["ipv4.address"],
+			}
+
+			subnets = append(subnets, subnet)
+		}
+
+		// IPv6
+		if m["maas.subnet.ipv6"] != "" {
+			subnet := maas.ContainerInterfaceSubnet{
+				Name:    m["maas.subnet.ipv6"],
+				Address: m["ipv6.address"],
+			}
+
+			subnets = append(subnets, subnet)
+		}
+
+		iface := maas.ContainerInterface{
+			Name:       m["name"],
+			MACAddress: m["hwaddr"],
+			Subnets:    subnets,
+		}
+
+		interfaces = append(interfaces, iface)
+	}
+
+	return interfaces, nil
+}
+
+func (vm *vmQemu) maasDelete() error {
+	maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
+	if err != nil {
+		return err
+	}
+
+	if maasURL == "" {
+		return nil
+	}
+
+	interfaces, err := vm.maasInterfaces(vm.expandedDevices.CloneNative())
+	if err != nil {
+		return err
+	}
+
+	if len(interfaces) == 0 {
+		return nil
+	}
+
+	if vm.state.MAAS == nil {
+		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
+	}
+
+	exists, err := vm.state.MAAS.DefinedContainer(project.Prefix(vm.project, vm.name))
+	if err != nil {
+		return err
+	}
+
+	if !exists {
+		return nil
+	}
+
+	return vm.state.MAAS.DeleteContainer(project.Prefix(vm.project, vm.name))
+}
+
+func (vm *vmQemu) maasUpdate(oldDevices map[string]map[string]string) error {
+	// Check if MAAS is configured
+	maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
+	if err != nil {
+		return err
+	}
+
+	if maasURL == "" {
+		return nil
+	}
+
+	// Check if there's something that uses MAAS
+	interfaces, err := vm.maasInterfaces(vm.expandedDevices.CloneNative())
+	if err != nil {
+		return err
+	}
+
+	var oldInterfaces []maas.ContainerInterface
+	if oldDevices != nil {
+		oldInterfaces, err = vm.maasInterfaces(oldDevices)
+		if err != nil {
+			return err
+		}
+	}
+
+	if len(interfaces) == 0 && len(oldInterfaces) == 0 {
+		return nil
+	}
+
+	// See if we're connected to MAAS
+	if vm.state.MAAS == nil {
+		return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
+	}
+
+	exists, err := vm.state.MAAS.DefinedContainer(project.Prefix(vm.project, vm.name))
+	if err != nil {
+		return err
+	}
+
+	if exists {
+		if len(interfaces) == 0 && len(oldInterfaces) > 0 {
+			return vm.state.MAAS.DeleteContainer(project.Prefix(vm.project, vm.name))
+		}
+
+		return vm.state.MAAS.UpdateContainer(project.Prefix(vm.project, vm.name), interfaces)
+	}
+
+	return vm.state.MAAS.CreateContainer(project.Prefix(vm.project, vm.name), interfaces)
+}

From f419315e33e1740702025e97c036bf9f79c81d7a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 10:56:01 +0000
Subject: [PATCH 03/30] lxd/api/internal: Updates use of renamed functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/api_internal.go | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/lxd/api_internal.go b/lxd/api_internal.go
index 46393aba2c..11098e84fb 100644
--- a/lxd/api_internal.go
+++ b/lxd/api_internal.go
@@ -871,7 +871,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 	if containerErr == nil {
 		// Remove the storage volume db entry for the container since
 		// force was specified.
-		err := d.cluster.ContainerRemove(projectName, req.Name)
+		err := d.cluster.InstanceRemove(projectName, req.Name)
 		if err != nil {
 			return response.SmartError(err)
 		}
@@ -916,7 +916,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 	if err != nil {
 		return response.SmartError(err)
 	}
-	_, err = containerCreateInternal(d.State(), db.InstanceArgs{
+	_, err = instanceCreateInternal(d.State(), db.InstanceArgs{
 		Project:      projectName,
 		Architecture: arch,
 		BaseImage:    baseImage,
@@ -982,7 +982,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 		}
 
 		if snapErr == nil {
-			err := d.cluster.ContainerRemove(projectName, snap.Name)
+			err := d.cluster.InstanceRemove(projectName, snap.Name)
 			if err != nil {
 				return response.SmartError(err)
 			}
@@ -1022,7 +1022,7 @@ func internalImport(d *Daemon, r *http.Request) response.Response {
 			snap.Devices[rootDevName] = rootDev
 		}
 
-		_, err = containerCreateInternal(d.State(), db.InstanceArgs{
+		_, err = instanceCreateInternal(d.State(), db.InstanceArgs{
 			Project:      projectName,
 			Architecture: arch,
 			BaseImage:    baseImage,

From 5c966c68a34f5d727cd7a5181f14ef9de8296250 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 10:58:31 +0000
Subject: [PATCH 04/30] lxd/container: Updates return values of instance create
 and load functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 0fcb2fdea6..d3ecf525ef 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -255,9 +255,9 @@ type container interface {
 }
 
 // containerCreateAsEmpty creates an empty instance.
-func containerCreateAsEmpty(d *Daemon, args db.InstanceArgs) (container, error) {
+func containerCreateAsEmpty(d *Daemon, args db.InstanceArgs) (Instance, error) {
 	// Create the container.
-	c, err := containerCreateInternal(d.State(), args)
+	c, err := instanceCreateInternal(d.State(), args)
 	if err != nil {
 		return nil, err
 	}
@@ -402,9 +402,9 @@ func containerCreateFromBackup(s *state.State, info backup.Info, data io.ReadSee
 	return pool, nil
 }
 
-func containerCreateEmptySnapshot(s *state.State, args db.InstanceArgs) (container, error) {
+func containerCreateEmptySnapshot(s *state.State, args db.InstanceArgs) (Instance, error) {
 	// Create the snapshot
-	c, err := containerCreateInternal(s, args)
+	c, err := instanceCreateInternal(s, args)
 	if err != nil {
 		return nil, err
 	}
@@ -544,7 +544,7 @@ func containerCreateAsCopy(s *state.State, args db.InstanceArgs, sourceContainer
 
 	if !refresh {
 		// Create the container.
-		ct, err = containerCreateInternal(s, args)
+		ct, err = instanceCreateInternal(s, args)
 		if err != nil {
 			return nil, err
 		}
@@ -564,7 +564,7 @@ func containerCreateAsCopy(s *state.State, args db.InstanceArgs, sourceContainer
 		parentStoragePool = parentLocalRootDiskDevice["pool"]
 	}
 
-	csList := []*container{}
+	csList := []*Instance{}
 	var snapshots []Instance
 
 	if !containerOnly {
@@ -632,7 +632,7 @@ func containerCreateAsCopy(s *state.State, args db.InstanceArgs, sourceContainer
 			}
 
 			// Create the snapshots.
-			cs, err := containerCreateInternal(s, csArgs)
+			cs, err := instanceCreateInternal(s, csArgs)
 			if err != nil {
 				if !refresh {
 					ct.Delete()
@@ -750,7 +750,7 @@ func containerCreateAsSnapshot(s *state.State, args db.InstanceArgs, sourceInsta
 	}
 
 	// Create the snapshot
-	c, err := containerCreateInternal(s, args)
+	c, err := instanceCreateInternal(s, args)
 	if err != nil {
 		return nil, err
 	}

From e5bc5ad9bb78474438cbbe7fd6590a24c5ba67ef Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 10:59:11 +0000
Subject: [PATCH 05/30] lxc/container: Renames containerCreateFromImage to
 instanceCreateFromImage

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 41 ++++++++++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 17 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index d3ecf525ef..537d524186 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -419,8 +419,8 @@ func containerCreateEmptySnapshot(s *state.State, args db.InstanceArgs) (Instanc
 	return c, nil
 }
 
-// containerCreateFromImage creates an instance from a rootfs image.
-func containerCreateFromImage(d *Daemon, args db.InstanceArgs, hash string, op *operations.Operation) (container, error) {
+// instanceCreateFromImage creates an instance from a rootfs image.
+func instanceCreateFromImage(d *Daemon, args db.InstanceArgs, hash string, op *operations.Operation) (Instance, error) {
 	s := d.State()
 
 	// Get the image properties.
@@ -475,59 +475,66 @@ func containerCreateFromImage(d *Daemon, args db.InstanceArgs, hash string, op *
 	// Set the BaseImage field (regardless of previous value).
 	args.BaseImage = hash
 
-	// Create the container
-	c, err := containerCreateInternal(s, args)
+	// Create the instance.
+	inst, err := instanceCreateInternal(s, args)
 	if err != nil {
-		return nil, errors.Wrap(err, "Create container")
+		return nil, errors.Wrap(err, "Create instance")
 	}
 
+	revert := true
+	defer func() {
+		if !revert {
+			return
+		}
+
+		inst.Delete()
+	}()
+
 	err = s.Cluster.ImageLastAccessUpdate(hash, time.Now().UTC())
 	if err != nil {
-		c.Delete()
 		return nil, fmt.Errorf("Error updating image last use date: %s", err)
 	}
 
 	// Check if we can load new storage layer for pool driver type.
-	pool, err := storagePools.GetPoolByInstance(d.State(), c)
+	pool, err := storagePools.GetPoolByInstance(d.State(), inst)
 	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
 		if err != nil {
 			return nil, errors.Wrap(err, "Load instance storage pool")
 		}
 
-		err = pool.CreateInstanceFromImage(c, hash, op)
+		err = pool.CreateInstanceFromImage(inst, hash, op)
 		if err != nil {
-			c.Delete()
 			return nil, errors.Wrap(err, "Create instance from image")
 		}
-	} else if c.Type() == instancetype.Container {
+	} else if inst.Type() == instancetype.Container {
 		metadata := make(map[string]interface{})
 		var tracker *ioprogress.ProgressTracker
 		if op != nil {
 			tracker = &ioprogress.ProgressTracker{
 				Handler: func(percent, speed int64) {
+					// tomp TODO should the container reference here be removed?
 					shared.SetProgressMetadata(metadata, "create_container_from_image_unpack", "Unpack", percent, 0, speed)
 					op.UpdateMetadata(metadata)
 				}}
 		}
 
 		// Now create the storage from an image.
-		err = c.Storage().ContainerCreateFromImage(c, hash, tracker)
+		err = inst.Storage().ContainerCreateFromImage(inst, hash, tracker)
 		if err != nil {
-			c.Delete()
-			return nil, errors.Wrap(err, "Create container from image")
+			return nil, errors.Wrap(err, "Create instance from image")
 		}
 	} else {
 		return nil, fmt.Errorf("Instance type not supported")
 	}
 
 	// Apply any post-storage configuration.
-	err = containerConfigureInternal(d.State(), c)
+	err = containerConfigureInternal(d.State(), inst)
 	if err != nil {
-		c.Delete()
-		return nil, errors.Wrap(err, "Configure container")
+		return nil, errors.Wrap(err, "Configure instance")
 	}
 
-	return c, nil
+	revert = false
+	return inst, nil
 }
 
 func containerCreateAsCopy(s *state.State, args db.InstanceArgs, sourceContainer Instance, containerOnly bool, refresh bool) (Instance, error) {

From cbea788ed28e62ead462ff47b88bb7e6ba1ce166 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 10:59:50 +0000
Subject: [PATCH 06/30] lxd/container: Renames containerCreateInternal to
 instanceCreateInternal

- Adds support for VM creation

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 73 +++++++++++++++++++++++++++++-------------------
 1 file changed, 45 insertions(+), 28 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 537d524186..627694b557 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -799,8 +799,8 @@ func containerCreateAsSnapshot(s *state.State, args db.InstanceArgs, sourceInsta
 	return c, nil
 }
 
-func containerCreateInternal(s *state.State, args db.InstanceArgs) (container, error) {
-	// Set default values
+func instanceCreateInternal(s *state.State, args db.InstanceArgs) (Instance, error) {
+	// Set default values.
 	if args.Project == "" {
 		args.Project = "default"
 	}
@@ -832,11 +832,11 @@ func containerCreateInternal(s *state.State, args db.InstanceArgs) (container, e
 			return nil, err
 		}
 
-		// Unset expiry date since containers don't expire
+		// Unset expiry date since containers don't expire.
 		args.ExpiryDate = time.Time{}
 	}
 
-	// Validate container config
+	// Validate container config.
 	err := containerValidConfig(s.OS, args.Config, false, false)
 	if err != nil {
 		return nil, err
@@ -848,7 +848,7 @@ func containerCreateInternal(s *state.State, args db.InstanceArgs) (container, e
 		return nil, errors.Wrap(err, "Invalid devices")
 	}
 
-	// Validate architecture
+	// Validate architecture.
 	_, err = osarch.ArchitectureName(args.Architecture)
 	if err != nil {
 		return nil, err
@@ -858,7 +858,7 @@ func containerCreateInternal(s *state.State, args db.InstanceArgs) (container, e
 		return nil, fmt.Errorf("Requested architecture isn't supported by this host")
 	}
 
-	// Validate profiles
+	// Validate profiles.
 	profiles, err := s.Cluster.Profiles(args.Project)
 	if err != nil {
 		return nil, err
@@ -885,15 +885,15 @@ func containerCreateInternal(s *state.State, args db.InstanceArgs) (container, e
 		args.LastUsedDate = time.Unix(0, 0).UTC()
 	}
 
-	var container db.Instance
+	var dbInst db.Instance
+
 	err = s.Cluster.Transaction(func(tx *db.ClusterTx) error {
 		node, err := tx.NodeName()
 		if err != nil {
 			return err
 		}
 
-		// TODO: this check should probably be performed by the db
-		// package itself.
+		// TODO: this check should probably be performed by the db package itself.
 		exists, err := tx.ProjectExists(args.Project)
 		if err != nil {
 			return errors.Wrapf(err, "Check if project %q exists", args.Project)
@@ -932,13 +932,13 @@ func containerCreateInternal(s *state.State, args db.InstanceArgs) (container, e
 				return errors.Wrap(err, "Fetch created snapshot from the database")
 			}
 
-			container = db.InstanceSnapshotToInstance(instance, s)
+			dbInst = db.InstanceSnapshotToInstance(instance, s)
 
 			return nil
 		}
 
-		// Create the container entry
-		container = db.Instance{
+		// Create the instance entry.
+		dbInst = db.Instance{
 			Project:      args.Project,
 			Name:         args.Name,
 			Node:         node,
@@ -956,28 +956,28 @@ func containerCreateInternal(s *state.State, args db.InstanceArgs) (container, e
 			ExpiryDate:   args.ExpiryDate,
 		}
 
-		_, err = tx.InstanceCreate(container)
+		_, err = tx.InstanceCreate(dbInst)
 		if err != nil {
-			return errors.Wrap(err, "Add container info to the database")
+			return errors.Wrap(err, "Add instance info to the database")
 		}
 
-		// Read back the container, to get ID and creation time.
-		c, err := tx.InstanceGet(args.Project, args.Name)
+		// Read back the instance, to get ID and creation time.
+		dbRow, err := tx.InstanceGet(args.Project, args.Name)
 		if err != nil {
-			return errors.Wrap(err, "Fetch created container from the database")
+			return errors.Wrap(err, "Fetch created instance from the database")
 		}
 
-		container = *c
+		dbInst = *dbRow
 
-		if container.ID < 1 {
-			return errors.Wrapf(err, "Unexpected container database ID %d", container.ID)
+		if dbInst.ID < 1 {
+			return errors.Wrapf(err, "Unexpected instance database ID %d", dbInst.ID)
 		}
 
 		return nil
 	})
 	if err != nil {
 		if err == db.ErrAlreadyDefined {
-			thing := "Container"
+			thing := "Instance"
 			if shared.IsSnapshot(args.Name) {
 				thing = "Snapshot"
 			}
@@ -986,19 +986,36 @@ func containerCreateInternal(s *state.State, args db.InstanceArgs) (container, e
 		return nil, err
 	}
 
-	// Wipe any existing log for this container name
+	revert := true
+	defer func() {
+		if !revert {
+			return
+		}
+
+		s.Cluster.InstanceRemove(dbInst.Project, dbInst.Name)
+	}()
+
+	// Wipe any existing log for this instance name.
 	os.RemoveAll(shared.LogPath(args.Name))
 
-	args = db.ContainerToArgs(&container)
+	args = db.ContainerToArgs(&dbInst)
+
+	var inst Instance
+
+	if args.Type == instancetype.Container {
+		inst, err = containerLXCCreate(s, args)
+	} else if args.Type == instancetype.VM {
+		inst, err = vmQemuCreate(s, args)
+	} else {
+		return nil, fmt.Errorf("Instance type invalid")
+	}
 
-	// Setup the container struct and finish creation (storage and idmap)
-	c, err := containerLXCCreate(s, args)
 	if err != nil {
-		s.Cluster.ContainerRemove(args.Project, args.Name)
-		return nil, errors.Wrap(err, "Create LXC container")
+		return nil, err
 	}
 
-	return c, nil
+	revert = false
+	return inst, nil
 }
 
 func containerConfigureInternal(state *state.State, c Instance) error {

From fdc7c0fbd6ee8c7fabaa31e715a9a3d559f0ca04 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:00:19 +0000
Subject: [PATCH 07/30] lxd/container: Adds VM support to instanceLoad

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 627694b557..9bbe6e13bf 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -1287,12 +1287,14 @@ func instanceLoadAllInternal(dbInstances []db.Instance, s *state.State) ([]Insta
 }
 
 // instanceLoad creates the underlying instance type struct and returns it as an Instance.
-func instanceLoad(s *state.State, args db.InstanceArgs, cProfiles []api.Profile) (Instance, error) {
+func instanceLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (Instance, error) {
 	var inst Instance
 	var err error
 
 	if args.Type == instancetype.Container {
-		inst, err = containerLXCLoad(s, args, cProfiles)
+		inst, err = containerLXCLoad(s, args, profiles)
+	} else if args.Type == instancetype.VM {
+		inst, err = vmQemuLoad(s, args, profiles)
 	} else {
 		return nil, fmt.Errorf("Invalid instance type for instance %s", args.Name)
 	}

From 316dfdee8ec4a257c2a4943671a5ea2a77ebd495 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:00:39 +0000
Subject: [PATCH 08/30] lxd/container/lxc: Updates use of renamed
 c.state.Cluster.InstanceRemove

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_lxc.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index b52b732806..2071b242bb 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -3614,7 +3614,7 @@ func (c *containerLXC) Delete() error {
 		}
 
 		// Remove the database record of the instance or snapshot instance.
-		if err := c.state.Cluster.ContainerRemove(c.Project(), c.Name()); err != nil {
+		if err := c.state.Cluster.InstanceRemove(c.Project(), c.Name()); err != nil {
 			logger.Error("Failed deleting instance entry", log.Ctx{"project": c.Project(), "instance": c.Name(), "err": err})
 			return err
 		}
@@ -3689,7 +3689,7 @@ func (c *containerLXC) Delete() error {
 		}
 
 		// Remove the database record
-		if err := c.state.Cluster.ContainerRemove(c.project, c.Name()); err != nil {
+		if err := c.state.Cluster.InstanceRemove(c.project, c.Name()); err != nil {
 			logger.Error("Failed deleting container entry", log.Ctx{"name": c.Name(), "err": err})
 			return err
 		}

From be392c3a606f7fe7c6457bc38f4cc5e97d2dff3c Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:01:04 +0000
Subject: [PATCH 09/30] lxd/containers/post: Adds VM support to createFromImage

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/containers_post.go | 18 +++++++++++++-----
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index e784bb3563..d501b67d9a 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -118,9 +118,16 @@ func createFromImage(d *Daemon, project string, req *api.InstancesPost) response
 			if err != nil {
 				return err
 			}
+
+			// Detect image type based on instance type requested.
+			imgType := "container"
+			if req.Type == "virtual-machine" {
+				imgType = "virtual-machine"
+			}
+
 			info, err = d.ImageDownload(
 				op, req.Source.Server, req.Source.Protocol, req.Source.Certificate,
-				req.Source.Secret, hash, "container", true, autoUpdate, "", true, project)
+				req.Source.Secret, hash, imgType, true, autoUpdate, "", true, project)
 			if err != nil {
 				return err
 			}
@@ -136,11 +143,12 @@ func createFromImage(d *Daemon, project string, req *api.InstancesPost) response
 			return err
 		}
 
-		_, err = containerCreateFromImage(d, args, info.Fingerprint, op)
+		_, err = instanceCreateFromImage(d, args, info.Fingerprint, op)
 		return err
 	}
 
 	resources := map[string][]string{}
+	// tomp TODO should this be renamed/added to?
 	resources["containers"] = []string{req.Name}
 
 	op, err := operations.OperationCreate(d.State(), project, operations.OperationClassTask, db.OperationContainerCreate, resources, nil, run, nil, nil)
@@ -198,7 +206,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
 		return response.NotImplemented(fmt.Errorf("Mode '%s' not implemented", req.Source.Mode))
 	}
 
-	var c container
+	var c Instance
 
 	// Parse the architecture name
 	architecture, err := osarch.ArchitectureId(req.Architecture)
@@ -293,7 +301,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
 			return response.BadRequest(fmt.Errorf("Instance type not container"))
 		}
 
-		c = inst.(container)
+		c = inst
 	}
 
 	if !req.Source.Refresh {
@@ -340,7 +348,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
 			}
 
 			if ps.MigrationType() == migration.MigrationFSType_RSYNC {
-				c, err = containerCreateFromImage(d, args, req.Source.BaseImage, nil)
+				c, err = instanceCreateFromImage(d, args, req.Source.BaseImage, nil)
 				if err != nil {
 					return response.InternalError(err)
 				}

From 0abf8ff0c205d6e457c1de6222f67b3a93262b55 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:01:19 +0000
Subject: [PATCH 10/30] lxd/db/containers: Renames ContainerRemove to
 InstanceRemove

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/db/containers.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index b2b4cf9d3d..58ff234f32 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -620,8 +620,8 @@ func (c *ClusterTx) configUpdate(id int, values map[string]string, insertSQL, de
 	return nil
 }
 
-// ContainerRemove removes the container with the given name from the database.
-func (c *Cluster) ContainerRemove(project, name string) error {
+// InstanceRemove removes the instance with the given name from the database.
+func (c *Cluster) InstanceRemove(project, name string) error {
 	if strings.Contains(name, shared.SnapshotDelimiter) {
 		parts := strings.SplitN(name, shared.SnapshotDelimiter, 2)
 		return c.Transaction(func(tx *ClusterTx) error {

From 6b21846f91520015a185292f44c09fccc9677dd9 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:04:06 +0000
Subject: [PATCH 11/30] lxd/container: Renames containerCreateAsEmpty to 
 containerCreateAsEmpty

- Adds revert for safety

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container.go | 33 ++++++++++++++++++++-------------
 1 file changed, 20 insertions(+), 13 deletions(-)

diff --git a/lxd/container.go b/lxd/container.go
index 9bbe6e13bf..d9b0838a14 100644
--- a/lxd/container.go
+++ b/lxd/container.go
@@ -254,31 +254,38 @@ type container interface {
 	NextIdmap() (*idmap.IdmapSet, error)
 }
 
-// containerCreateAsEmpty creates an empty instance.
-func containerCreateAsEmpty(d *Daemon, args db.InstanceArgs) (Instance, error) {
-	// Create the container.
-	c, err := instanceCreateInternal(d.State(), args)
+// instanceCreateAsEmpty creates an empty instance.
+func instanceCreateAsEmpty(d *Daemon, args db.InstanceArgs) (Instance, error) {
+	// Create the instance record.
+	inst, err := instanceCreateInternal(d.State(), args)
 	if err != nil {
 		return nil, err
 	}
 
+	revert := true
+	defer func() {
+		if !revert {
+			return
+		}
+
+		inst.Delete()
+	}()
+
 	// Check if we can load new storage layer for pool driver type.
-	pool, err := storagePools.GetPoolByInstance(d.State(), c)
+	pool, err := storagePools.GetPoolByInstance(d.State(), inst)
 	if err != storageDrivers.ErrUnknownDriver && err != storageDrivers.ErrNotImplemented {
 		if err != nil {
 			return nil, errors.Wrap(err, "Load instance storage pool")
 		}
 
-		err = pool.CreateInstance(c, nil)
+		err = pool.CreateInstance(inst, nil)
 		if err != nil {
-			c.Delete()
 			return nil, errors.Wrap(err, "Create instance")
 		}
-	} else if c.Type() == instancetype.Container {
+	} else if inst.Type() == instancetype.Container {
 		// Now create the empty storage.
-		err = c.Storage().ContainerCreate(c)
+		err = inst.Storage().ContainerCreate(inst)
 		if err != nil {
-			c.Delete()
 			return nil, err
 		}
 	} else {
@@ -286,13 +293,13 @@ func containerCreateAsEmpty(d *Daemon, args db.InstanceArgs) (Instance, error) {
 	}
 
 	// Apply any post-storage configuration.
-	err = containerConfigureInternal(d.State(), c)
+	err = containerConfigureInternal(d.State(), inst)
 	if err != nil {
-		c.Delete()
 		return nil, err
 	}
 
-	return c, nil
+	revert = false
+	return inst, nil
 }
 
 func containerCreateFromBackup(s *state.State, info backup.Info, data io.ReadSeeker,

From 24242b3937d0574b7537706ecc7d1caa7b30e642 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:09:46 +0000
Subject: [PATCH 12/30] lxd/containers/post: Updates use of
 instanceCreateAsEmpty

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/containers_post.go | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/lxd/containers_post.go b/lxd/containers_post.go
index d501b67d9a..eced2a1c89 100644
--- a/lxd/containers_post.go
+++ b/lxd/containers_post.go
@@ -185,7 +185,7 @@ func createFromNone(d *Daemon, project string, req *api.InstancesPost) response.
 	}
 
 	run := func(op *operations.Operation) error {
-		_, err := containerCreateAsEmpty(d, args)
+		_, err := instanceCreateAsEmpty(d, args)
 		return err
 	}
 
@@ -320,7 +320,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
 		 */
 		_, _, err = d.cluster.ImageGet(args.Project, req.Source.BaseImage, false, true)
 		if err != nil {
-			c, err = containerCreateAsEmpty(d, args)
+			c, err = instanceCreateAsEmpty(d, args)
 			if err != nil {
 				return response.InternalError(err)
 			}
@@ -353,7 +353,7 @@ func createFromMigration(d *Daemon, project string, req *api.InstancesPost) resp
 					return response.InternalError(err)
 				}
 			} else {
-				c, err = containerCreateAsEmpty(d, args)
+				c, err = instanceCreateAsEmpty(d, args)
 				if err != nil {
 					return response.InternalError(err)
 				}

From 0a00433fa2f67e803ba3a94c9daf9ef60c2acf84 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:36:17 +0000
Subject: [PATCH 13/30] lxd/storage/backend/lxd: Pass correct content type to
 storage drivers for VMs

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/backend_lxd.go | 31 ++++++++++++++++++++++++++-----
 1 file changed, 26 insertions(+), 5 deletions(-)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index 1e8b81b2a8..b8c5483075 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -285,7 +285,12 @@ func (b *lxdBackend) CreateInstance(inst Instance, op *operations.Operation) err
 		b.DeleteInstance(inst, op)
 	}()
 
-	vol := b.newVolume(volType, drivers.ContentTypeFS, project.Prefix(inst.Project(), inst.Name()), nil)
+	contentType := drivers.ContentTypeFS
+	if inst.Type() == instancetype.VM {
+		contentType = drivers.ContentTypeBlock
+	}
+
+	vol := b.newVolume(volType, contentType, project.Prefix(inst.Project(), inst.Name()), nil)
 	err = b.driver.CreateVolume(vol, nil, op)
 	if err != nil {
 		return err
@@ -352,7 +357,12 @@ func (b *lxdBackend) CreateInstanceFromImage(inst Instance, fingerprint string,
 		b.DeleteInstance(inst, op)
 	}()
 
-	vol := b.newVolume(volType, drivers.ContentTypeFS, project.Prefix(inst.Project(), inst.Name()), nil)
+	contentType := drivers.ContentTypeFS
+	if inst.Type() == instancetype.VM {
+		contentType = drivers.ContentTypeBlock
+	}
+
+	vol := b.newVolume(volType, contentType, project.Prefix(inst.Project(), inst.Name()), nil)
 
 	// If the driver doesn't support optimized image volumes then create a new empty volume and
 	// populate it with the contents of the image archive.
@@ -370,7 +380,7 @@ func (b *lxdBackend) CreateInstanceFromImage(inst Instance, fingerprint string,
 			return err
 		}
 
-		imgVol := b.newVolume(drivers.VolumeTypeImage, drivers.ContentTypeFS, fingerprint, nil)
+		imgVol := b.newVolume(drivers.VolumeTypeImage, contentType, fingerprint, nil)
 		err = b.driver.CreateVolumeFromCopy(vol, imgVol, false, op)
 		if err != nil {
 			return err
@@ -825,9 +835,20 @@ func (b *lxdBackend) EnsureImage(fingerprint string, op *operations.Operation) e
 		return nil
 	}
 
+	// Load image info from database.
+	_, image, err := b.state.Cluster.ImageGetFromAnyProject(fingerprint)
+	if err != nil {
+		return err
+	}
+
+	contentType := drivers.ContentTypeFS
+	if api.InstanceType(image.Type) == api.InstanceTypeVM {
+		contentType = drivers.ContentTypeBlock
+	}
+
 	// Create the new image volume.
-	vol := b.newVolume(drivers.VolumeTypeImage, drivers.ContentTypeFS, fingerprint, nil)
-	err := b.driver.CreateVolume(vol, b.imageFiller(fingerprint, op), op)
+	vol := b.newVolume(drivers.VolumeTypeImage, contentType, fingerprint, nil)
+	err = b.driver.CreateVolume(vol, b.imageFiller(fingerprint, op), op)
 	if err != nil {
 		return err
 	}

From 91df6957f9ef32f698fd4c4a6c340a2da61c8265 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:57:18 +0000
Subject: [PATCH 14/30] lxd/storage/drivers/utils: Unexports
 deleteParentSnapshotDirIfEmpty

As not used outside of package.

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/utils.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go
index ffc09539ed..655c2ac58a 100644
--- a/lxd/storage/drivers/utils.go
+++ b/lxd/storage/drivers/utils.go
@@ -192,9 +192,9 @@ func GetSnapshotVolumeName(parentName, snapshotName string) string {
 	return fmt.Sprintf("%s%s%s", parentName, shared.SnapshotDelimiter, snapshotName)
 }
 
-// DeleteParentSnapshotDirIfEmpty removes the parent snapshot directory if it is empty.
+// deleteParentSnapshotDirIfEmpty removes the parent snapshot directory if it is empty.
 // It accepts the pool name, volume type and parent volume name.
-func DeleteParentSnapshotDirIfEmpty(poolName string, volType VolumeType, volName string) error {
+func deleteParentSnapshotDirIfEmpty(poolName string, volType VolumeType, volName string) error {
 	snapshotsPath, err := GetVolumeSnapshotDir(poolName, volType, volName)
 	if err != nil {
 		return err

From 00c09f3646753e3b22c149a158d6c0ab668debb4 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:57:50 +0000
Subject: [PATCH 15/30] lxd/storage/drivers/driver/dir: Updates use of
 deleteParentSnapshotDirIfEmpty

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/driver_dir.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/storage/drivers/driver_dir.go b/lxd/storage/drivers/driver_dir.go
index a80c2ccb12..d84a307774 100644
--- a/lxd/storage/drivers/driver_dir.go
+++ b/lxd/storage/drivers/driver_dir.go
@@ -632,7 +632,7 @@ func (d *dir) DeleteVolume(volType VolumeType, volName string, op *operations.Op
 
 	// Although the volume snapshot directory should already be removed, lets remove it here
 	// to just in case the top-level directory is left.
-	err = DeleteParentSnapshotDirIfEmpty(d.name, volType, volName)
+	err = deleteParentSnapshotDirIfEmpty(d.name, volType, volName)
 	if err != nil {
 		return err
 	}
@@ -827,7 +827,7 @@ func (d *dir) DeleteVolumeSnapshot(volType VolumeType, volName string, snapshotN
 	}
 
 	// Remove the parent snapshot directory if this is the last snapshot being removed.
-	err = DeleteParentSnapshotDirIfEmpty(d.name, volType, volName)
+	err = deleteParentSnapshotDirIfEmpty(d.name, volType, volName)
 	if err != nil {
 		return err
 	}

From ad30884ee9f216a70b8f96c78659f2e94c597fc5 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 11:58:09 +0000
Subject: [PATCH 16/30] lxd/storage/drivers/utils: Adds createSparseFile

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/utils.go | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/lxd/storage/drivers/utils.go b/lxd/storage/drivers/utils.go
index 655c2ac58a..1b16c2de5d 100644
--- a/lxd/storage/drivers/utils.go
+++ b/lxd/storage/drivers/utils.go
@@ -217,3 +217,24 @@ func deleteParentSnapshotDirIfEmpty(poolName string, volType VolumeType, volName
 
 	return nil
 }
+
+// createSparseFile creates a sparse empty file at specified location with specified size.
+func createSparseFile(filePath string, sizeBytes int64) error {
+	f, err := os.Create(filePath)
+	if err != nil {
+		return fmt.Errorf("Failed to open %s: %s", filePath, err)
+	}
+	defer f.Close()
+
+	err = f.Chmod(0600)
+	if err != nil {
+		return fmt.Errorf("Failed to chmod %s: %s", filePath, err)
+	}
+
+	err = f.Truncate(sizeBytes)
+	if err != nil {
+		return fmt.Errorf("Failed to create sparse file %s: %s", filePath, err)
+	}
+
+	return nil
+}

From d83dcaee77871a20a261af0805dd3aef10af999b Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 16:54:27 +0000
Subject: [PATCH 17/30] lxd/container/lxc: Updating DB usage to be instance
 type agnostic

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_lxc.go | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/lxd/container_lxc.go b/lxd/container_lxc.go
index 2071b242bb..a6e5a13c96 100644
--- a/lxd/container_lxc.go
+++ b/lxd/container_lxc.go
@@ -3544,7 +3544,7 @@ func (c *containerLXC) Delete() error {
 	}
 
 	// Get the storage pool name of the instance.
-	poolName, err := c.state.Cluster.ContainerPool(c.Project(), c.Name())
+	poolName, err := c.state.Cluster.InstancePool(c.Project(), c.Name())
 	if err != nil {
 		return err
 	}
@@ -4055,7 +4055,12 @@ func writeBackupFile(c Instance) error {
 		return err
 	}
 
-	_, volume, err := s.Cluster.StoragePoolNodeVolumeGetTypeByProject(c.Project(), c.Name(), storagePoolVolumeTypeContainer, poolID)
+	dbType := db.StoragePoolVolumeTypeContainer
+	if c.Type() == instancetype.VM {
+		dbType = db.StoragePoolVolumeTypeVM
+	}
+
+	_, volume, err := s.Cluster.StoragePoolNodeVolumeGetTypeByProject(c.Project(), c.Name(), dbType, poolID)
 	if err != nil {
 		return err
 	}
@@ -6897,7 +6902,7 @@ func (c *containerLXC) StatePath() string {
 }
 
 func (c *containerLXC) StoragePool() (string, error) {
-	poolName, err := c.state.Cluster.ContainerPool(c.Project(), c.Name())
+	poolName, err := c.state.Cluster.InstancePool(c.Project(), c.Name())
 	if err != nil {
 		return "", err
 	}

From 2791da45dd6a5fd4672d570220297008039c7aa3 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 16:54:59 +0000
Subject: [PATCH 18/30] lxd/container/post: Updates usage of InstancePool

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/container_post.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/container_post.go b/lxd/container_post.go
index d9027961ce..3371b332b9 100644
--- a/lxd/container_post.go
+++ b/lxd/container_post.go
@@ -178,7 +178,7 @@ func containerPost(d *Daemon, r *http.Request) response.Response {
 			}
 
 			// Check if we are migrating a ceph-based container.
-			poolName, err := d.cluster.ContainerPool(project, name)
+			poolName, err := d.cluster.InstancePool(project, name)
 			if err != nil {
 				err = errors.Wrap(err, "Failed to fetch container's pool name")
 				return response.SmartError(err)
@@ -449,7 +449,7 @@ func containerPostClusteringMigrateWithCeph(d *Daemon, c Instance, project, oldN
 				return errors.Wrapf(
 					err, "Move container %s to %s with new name %s", oldName, newNode, newName)
 			}
-			poolName, err = tx.ContainerPool(project, newName)
+			poolName, err = tx.InstancePool(project, newName)
 			if err != nil {
 				return errors.Wrapf(err, "Get the container's storage pool name for %s", newName)
 			}

From 9686b4eb4b575ae16be0cda48e41a4b73660b532 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 16:55:27 +0000
Subject: [PATCH 19/30] lxd/db/containers: Updates pool lookup functions to be
 instance type agnostic

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/db/containers.go | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/lxd/db/containers.go b/lxd/db/containers.go
index 58ff234f32..c6c3fcfd00 100644
--- a/lxd/db/containers.go
+++ b/lxd/db/containers.go
@@ -444,7 +444,7 @@ func (c *ClusterTx) snapshotIDsAndNames(project, name string) (map[int]string, e
 func (c *ClusterTx) ContainerNodeMove(project, oldName, newName, newNode string) error {
 	// First check that the container to be moved is backed by a ceph
 	// volume.
-	poolName, err := c.ContainerPool(project, oldName)
+	poolName, err := c.InstancePool(project, oldName)
 	if err != nil {
 		return errors.Wrap(err, "failed to get container's storage pool name")
 	}
@@ -1085,23 +1085,23 @@ WHERE projects.name=? AND instances.name=?`
 	return max
 }
 
-// ContainerPool returns the storage pool of a given container.
+// InstancePool returns the storage pool of a given instance.
 //
 // This is a non-transactional variant of ClusterTx.ContainerPool().
-func (c *Cluster) ContainerPool(project, containerName string) (string, error) {
+func (c *Cluster) InstancePool(project, instanceName string) (string, error) {
 	var poolName string
 	err := c.Transaction(func(tx *ClusterTx) error {
 		var err error
-		poolName, err = tx.ContainerPool(project, containerName)
+		poolName, err = tx.InstancePool(project, instanceName)
 		return err
 	})
 	return poolName, err
 }
 
-// ContainerPool returns the storage pool of a given container.
-func (c *ClusterTx) ContainerPool(project, containerName string) (string, error) {
-	if strings.Contains(containerName, shared.SnapshotDelimiter) {
-		return c.containerPoolSnapshot(project, containerName)
+// InstancePool returns the storage pool of a given instance.
+func (c *ClusterTx) InstancePool(project, instanceName string) (string, error) {
+	if strings.Contains(instanceName, shared.SnapshotDelimiter) {
+		return c.instancePoolSnapshot(project, instanceName)
 	}
 
 	// Get container storage volume. Since container names are globally
@@ -1113,9 +1113,9 @@ SELECT storage_pools.name FROM storage_pools
   JOIN storage_volumes ON storage_pools.id=storage_volumes.storage_pool_id
   JOIN instances ON instances.name=storage_volumes.name
   JOIN projects ON projects.id=instances.project_id
- WHERE projects.name=? AND storage_volumes.node_id=? AND storage_volumes.name=? AND storage_volumes.type=?
+ WHERE projects.name=? AND storage_volumes.node_id=? AND storage_volumes.name=? AND storage_volumes.type IN(?,?)
 `
-	inargs := []interface{}{project, c.nodeID, containerName, StoragePoolVolumeTypeContainer}
+	inargs := []interface{}{project, c.nodeID, instanceName, StoragePoolVolumeTypeContainer, StoragePoolVolumeTypeVM}
 	outargs := []interface{}{&poolName}
 
 	err := c.tx.QueryRow(query, inargs...).Scan(outargs...)
@@ -1130,15 +1130,15 @@ SELECT storage_pools.name FROM storage_pools
 	return poolName, nil
 }
 
-func (c *ClusterTx) containerPoolSnapshot(project, fullName string) (string, error) {
+func (c *ClusterTx) instancePoolSnapshot(project, fullName string) (string, error) {
 	poolName := ""
 	query := `
 SELECT storage_pools.name FROM storage_pools
   JOIN storage_volumes ON storage_pools.id=storage_volumes.storage_pool_id
   JOIN projects ON projects.id=storage_volumes.project_id
- WHERE projects.name=? AND storage_volumes.node_id=? AND storage_volumes.name=? AND storage_volumes.type=?
+ WHERE projects.name=? AND storage_volumes.node_id=? AND storage_volumes.name=? AND storage_volumes.type IN(?,?)
 `
-	inargs := []interface{}{project, c.nodeID, fullName, StoragePoolVolumeTypeContainer}
+	inargs := []interface{}{project, c.nodeID, fullName, StoragePoolVolumeTypeContainer, StoragePoolVolumeTypeVM}
 	outargs := []interface{}{&poolName}
 
 	err := c.tx.QueryRow(query, inargs...).Scan(outargs...)

From 441001b2778189ae229cfecfb3e5e8f66367805a Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 16:56:21 +0000
Subject: [PATCH 20/30] lxd/db/storage/pools: Adds VM instance type constant
 and conversion codes

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/db/storage_pools.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lxd/db/storage_pools.go b/lxd/db/storage_pools.go
index b2995b23e0..1f9f30c8f4 100644
--- a/lxd/db/storage_pools.go
+++ b/lxd/db/storage_pools.go
@@ -1053,6 +1053,7 @@ const (
 	StoragePoolVolumeTypeContainer = iota
 	StoragePoolVolumeTypeImage
 	StoragePoolVolumeTypeCustom
+	StoragePoolVolumeTypeVM
 )
 
 // Leave the string type in here! This guarantees that go treats this is as a
@@ -1060,6 +1061,7 @@ const (
 // constants which is not what we want.
 const (
 	StoragePoolVolumeTypeNameContainer string = "container"
+	StoragePoolVolumeTypeNameVM        string = "virtual-machine"
 	StoragePoolVolumeTypeNameImage     string = "image"
 	StoragePoolVolumeTypeNameCustom    string = "custom"
 )
@@ -1081,6 +1083,8 @@ func StoragePoolVolumeTypeToName(volumeType int) (string, error) {
 	switch volumeType {
 	case StoragePoolVolumeTypeContainer:
 		return StoragePoolVolumeTypeNameContainer, nil
+	case StoragePoolVolumeTypeVM:
+		return StoragePoolVolumeTypeNameVM, nil
 	case StoragePoolVolumeTypeImage:
 		return StoragePoolVolumeTypeNameImage, nil
 	case StoragePoolVolumeTypeCustom:

From 9da7d01264220cb2f4e63f49f34da1a76c291591 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 16:56:56 +0000
Subject: [PATCH 21/30] lxd/storage: Updates InstancePool usage

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/storage.go b/lxd/storage.go
index 6a3aa7cf7d..11654a9153 100644
--- a/lxd/storage.go
+++ b/lxd/storage.go
@@ -620,7 +620,7 @@ func storagePoolVolumeContainerCreateInit(s *state.State, project string, poolNa
 
 func storagePoolVolumeContainerLoadInit(s *state.State, project, containerName string) (storage, error) {
 	// Get the storage pool of a given container.
-	poolName, err := s.Cluster.ContainerPool(project, containerName)
+	poolName, err := s.Cluster.InstancePool(project, containerName)
 	if err != nil {
 		return nil, errors.Wrapf(err, "Load storage pool for container %q in project %q", containerName, project)
 	}

From 5a1fa4ea77a8e93555e8a0bc78cdaaa96c1cf720 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:01:26 +0000
Subject: [PATCH 22/30] lxd/storage/backend/lxd: Comment tweaks

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/backend_lxd.go | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/lxd/storage/backend_lxd.go b/lxd/storage/backend_lxd.go
index b8c5483075..6e2fc9578a 100644
--- a/lxd/storage/backend_lxd.go
+++ b/lxd/storage/backend_lxd.go
@@ -318,10 +318,11 @@ func (b *lxdBackend) CreateInstanceFromCopy(inst Instance, src Instance, snapsho
 	return ErrNotImplemented
 }
 
-// imageFiller returns a function that can be used as a filler function with CreateVolume(). This
-// function will unpack the specified image archive into the specified mount path of the volume.
-func (b *lxdBackend) imageFiller(fingerprint string, op *operations.Operation) func(mountPath string) error {
-	return func(instanceMountPath string) error {
+// imageFiller returns a function that can be used as a filler function with CreateVolume().
+// The function returned will unpack the specified image archive into the specified mount path
+// provided, and for VM images, a raw root block path is required to unpack the qcow image into.
+func (b *lxdBackend) imageFiller(fingerprint string, op *operations.Operation) func(mountPath, rootBlockPath string) error {
+	return func(mountPath, rootBlockPath string) error {
 		var tracker *ioprogress.ProgressTracker
 		if op != nil { // Not passed when being done as part of pre-migration setup.
 			metadata := make(map[string]interface{})
@@ -331,10 +332,8 @@ func (b *lxdBackend) imageFiller(fingerprint string, op *operations.Operation) f
 					op.UpdateMetadata(metadata)
 				}}
 		}
-
-		imagePath := shared.VarPath("images", fingerprint)
-
-		return ImageUnpack(imagePath, instanceMountPath, b.driver.Info().BlockBacking, b.state.OS.RunningInUserNS, tracker)
+		imageFile := shared.VarPath("images", fingerprint)
+		return ImageUnpack(imageFile, mountPath, rootBlockPath, b.driver.Info().BlockBacking, b.state.OS.RunningInUserNS, tracker)
 	}
 }
 
@@ -660,6 +659,7 @@ func (b *lxdBackend) UnmountInstance(inst Instance, op *operations.Operation) (b
 	return b.driver.UnmountVolume(volType, volStorageName, op)
 }
 
+// GetInstanceDisk returns the type of disk for an instance and its location.
 func (b *lxdBackend) GetInstanceDisk(inst Instance) (string, string, error) {
 	return "", "", ErrNotImplemented
 }

From 52e7e91552633089b928068abf7ea0e0aa7c49eb Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:02:17 +0000
Subject: [PATCH 23/30] lxd/storage/drivers/driver/cephfs: filler usage update

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/driver_cephfs.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/lxd/storage/drivers/driver_cephfs.go b/lxd/storage/drivers/driver_cephfs.go
index 3806eb9558..4fdd0b041d 100644
--- a/lxd/storage/drivers/driver_cephfs.go
+++ b/lxd/storage/drivers/driver_cephfs.go
@@ -286,7 +286,7 @@ func (d *cephfs) ValidateVolume(vol Volume, removeUnknownKeys bool) error {
 	return d.validateVolume(vol, nil, removeUnknownKeys)
 }
 
-func (d *cephfs) CreateVolume(vol Volume, filler func(path string) error, op *operations.Operation) error {
+func (d *cephfs) CreateVolume(vol Volume, filler func(mountPath, rootBlockPath string) error, op *operations.Operation) error {
 	if vol.volType != VolumeTypeCustom {
 		return fmt.Errorf("Volume type not supported")
 	}
@@ -310,7 +310,7 @@ func (d *cephfs) CreateVolume(vol Volume, filler func(path string) error, op *op
 	}()
 
 	if filler != nil {
-		err = filler(volPath)
+		err = filler(volPath, "")
 		if err != nil {
 			return err
 		}

From 0b123cd31c19c14e8951e11413decf1d0e2a4d44 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:03:01 +0000
Subject: [PATCH 24/30] lxd/storage/drivers/driver/dir: Adds VM support to
 CreateVolume

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/driver_dir.go | 25 ++++++++++++++++++-------
 1 file changed, 18 insertions(+), 7 deletions(-)

diff --git a/lxd/storage/drivers/driver_dir.go b/lxd/storage/drivers/driver_dir.go
index d84a307774..a2bdda79f8 100644
--- a/lxd/storage/drivers/driver_dir.go
+++ b/lxd/storage/drivers/driver_dir.go
@@ -33,7 +33,7 @@ func (d *dir) Info() Info {
 		PreservesInodes:    false,
 		Usable:             true,
 		Remote:             false,
-		VolumeTypes:        []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer},
+		VolumeTypes:        []VolumeType{VolumeTypeCustom, VolumeTypeImage, VolumeTypeContainer, VolumeTypeVM},
 		BlockBacking:       false,
 		RunningQuotaResize: true,
 	}
@@ -163,11 +163,7 @@ func (d *dir) HasVolume(volType VolumeType, volName string) bool {
 
 // CreateVolume creates an empty volume and can optionally fill it by executing the supplied
 // filler function.
-func (d *dir) CreateVolume(vol Volume, filler func(path string) error, op *operations.Operation) error {
-	if vol.contentType != ContentTypeFS {
-		return fmt.Errorf("Content type not supported")
-	}
-
+func (d *dir) CreateVolume(vol Volume, filler func(mountPath, rootBlockPath string) error, op *operations.Operation) error {
 	volPath := vol.MountPath()
 
 	// Get the volume ID for the new volume, which is used to set project quota.
@@ -201,8 +197,23 @@ func (d *dir) CreateVolume(vol Volume, filler func(path string) error, op *opera
 		return err
 	}
 
+	// Create sparse loopback file if volume is block.
+	rootBlockPath := ""
+	if vol.contentType == ContentTypeBlock {
+		rootBlockPath = filepath.Join(volPath, "root.img")
+		sizeBytes, err := units.ParseByteSizeString("10GB")
+		if err != nil {
+			return err
+		}
+
+		err = createSparseFile(rootBlockPath, sizeBytes)
+		if err != nil {
+			return err
+		}
+	}
+
 	if filler != nil {
-		err = filler(volPath)
+		err = filler(volPath, rootBlockPath)
 		if err != nil {
 			return err
 		}

From afe939ba29d1761d0e9572ba76b3a7280db9ed15 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:03:21 +0000
Subject: [PATCH 25/30] lxd/storage/drivers/driver/dir: Adds content type
 checking to some functions

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/driver_dir.go | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/lxd/storage/drivers/driver_dir.go b/lxd/storage/drivers/driver_dir.go
index a2bdda79f8..8b1ce1881c 100644
--- a/lxd/storage/drivers/driver_dir.go
+++ b/lxd/storage/drivers/driver_dir.go
@@ -225,6 +225,10 @@ func (d *dir) CreateVolume(vol Volume, filler func(mountPath, rootBlockPath stri
 
 // MigrateVolume sends a volume for migration.
 func (d *dir) MigrateVolume(vol Volume, conn io.ReadWriteCloser, volSrcArgs migration.VolumeSourceArgs, op *operations.Operation) error {
+	if vol.contentType != ContentTypeFS {
+		return fmt.Errorf("Content type not supported")
+	}
+
 	if volSrcArgs.MigrationType.FSType != migration.MigrationFSType_RSYNC {
 		return fmt.Errorf("Migration type not supported")
 	}
@@ -487,6 +491,10 @@ func (d *dir) VolumeSnapshots(volType VolumeType, volName string, op *operations
 
 // UpdateVolume applies config changes to the volume.
 func (d *dir) UpdateVolume(vol Volume, changedConfig map[string]string) error {
+	if vol.contentType != ContentTypeFS {
+		return fmt.Errorf("Content type not supported")
+	}
+
 	if _, changed := changedConfig["size"]; changed {
 		volID, err := d.getVolID(vol.volType, vol.name)
 		if err != nil {

From 4117f8b522a33caa7604f8333f6bde93118625c6 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:05:58 +0000
Subject: [PATCH 26/30] lxd/storage/drivers/interface: CreateVolume signature
 update for filler change

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/drivers/interface.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/lxd/storage/drivers/interface.go b/lxd/storage/drivers/interface.go
index ed5a066a74..f224c9e335 100644
--- a/lxd/storage/drivers/interface.go
+++ b/lxd/storage/drivers/interface.go
@@ -32,7 +32,7 @@ type Driver interface {
 
 	// Volumes.
 	ValidateVolume(vol Volume, removeUnknownKeys bool) error
-	CreateVolume(vol Volume, filler func(path string) error, op *operations.Operation) error
+	CreateVolume(vol Volume, filler func(mountPath, rootBlockPath string) error, op *operations.Operation) error
 	CreateVolumeFromCopy(vol Volume, srcVol Volume, copySnapshots bool, op *operations.Operation) error
 	DeleteVolume(volType VolumeType, volName string, op *operations.Operation) error
 	RenameVolume(volType VolumeType, volName string, newName string, op *operations.Operation) error

From c1e867f899224b864df9f415001d0f5ea20dda28 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:06:24 +0000
Subject: [PATCH 27/30] lxd/storage/load: InstancePool usage

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/load.go | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/lxd/storage/load.go b/lxd/storage/load.go
index 5249f9a32e..14e058f896 100644
--- a/lxd/storage/load.go
+++ b/lxd/storage/load.go
@@ -152,12 +152,13 @@ func GetPoolByName(state *state.State, name string) (Pool, error) {
 // If the pool's driver is not recognised then drivers.ErrUnknownDriver is returned. If the pool's
 // driver does not support the instance's type then drivers.ErrNotImplemented is returned.
 func GetPoolByInstance(s *state.State, inst Instance) (Pool, error) {
-	poolName, err := s.Cluster.ContainerPool(inst.Project(), inst.Name())
+	poolName, err := s.Cluster.InstancePool(inst.Project(), inst.Name())
 	if err != nil {
 		return nil, err
 	}
 
 	pool, err := GetPoolByName(s, poolName)
+
 	if err != nil {
 		return nil, err
 	}

From 10ccf6799d137f4d8647393cb7156194e4bc1bac Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:06:48 +0000
Subject: [PATCH 28/30] lxd/storage/utils: Adds VM type conversion

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/utils.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index c71f69f980..aa2527ed6b 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -396,6 +396,8 @@ func VolumeTypeNameToType(volumeTypeName string) (int, error) {
 	switch volumeTypeName {
 	case db.StoragePoolVolumeTypeNameContainer:
 		return db.StoragePoolVolumeTypeContainer, nil
+	case db.StoragePoolVolumeTypeNameVM:
+		return db.StoragePoolVolumeTypeVM, nil
 	case db.StoragePoolVolumeTypeNameImage:
 		return db.StoragePoolVolumeTypeImage, nil
 	case db.StoragePoolVolumeTypeNameCustom:
@@ -410,6 +412,8 @@ func VolumeTypeToDBType(volType drivers.VolumeType) (int, error) {
 	switch volType {
 	case drivers.VolumeTypeContainer:
 		return db.StoragePoolVolumeTypeContainer, nil
+	case drivers.VolumeTypeVM:
+		return db.StoragePoolVolumeTypeVM, nil
 	case drivers.VolumeTypeImage:
 		return db.StoragePoolVolumeTypeImage, nil
 	case drivers.VolumeTypeCustom:

From 2c8a37481e22f44e3c9d8ba5356bbe972dabed3d Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:07:02 +0000
Subject: [PATCH 29/30] lxd/storage/utils: Updates ImageUnpack to support VM
 images

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage/utils.go | 61 ++++++++++++++++++++++++++++++++++++--------
 1 file changed, 51 insertions(+), 10 deletions(-)

diff --git a/lxd/storage/utils.go b/lxd/storage/utils.go
index aa2527ed6b..c9a97989dd 100644
--- a/lxd/storage/utils.go
+++ b/lxd/storage/utils.go
@@ -703,27 +703,68 @@ func validateVolumeCommonRules() map[string]func(string) error {
 }
 
 // ImageUnpack unpacks a filesystem image into the destination path.
-func ImageUnpack(imageFile string, destPath string, blockBackend bool, runningInUserns bool, tracker *ioprogress.ProgressTracker) error {
+// There are several formats that images can come in:
+// Container Format A: Separate metadata tarball and root squashfs file.
+// 	- Unpack metadata tarball into mountPath.
+//	- Unpack root squashfs file into mountPath/rootfs.
+// Container Format B: Combined tarball containing metadata files and root squashfs.
+//	- Unpack combined tarball into mountPath.
+// VM Format A: Separate metadata tarball and root qcow2 file.
+// 	- Unpack metadata tarball into mountPath.
+//	- Check rootBlockPath is a file and convert qcow2 file into raw format in rootBlockPath.
+func ImageUnpack(imageFile, destPath, destBlockFile string, blockBackend, runningInUserns bool, tracker *ioprogress.ProgressTracker) error {
+	// For all formats, first unpack the metadata (or combined) tarball into destPath.
 	err := shared.Unpack(imageFile, destPath, blockBackend, runningInUserns, tracker)
 	if err != nil {
 		return err
 	}
 
-	rootfsPath := fmt.Sprintf("%s/rootfs", destPath)
-	if shared.PathExists(imageFile + ".rootfs") {
-		err = os.MkdirAll(rootfsPath, 0755)
-		if err != nil {
-			return fmt.Errorf("Error creating rootfs directory")
+	imageRootfsFile := imageFile + ".rootfs"
+
+	// If no destBlockFile supplied then this is a container image unpack.
+	if destBlockFile == "" {
+		rootfsPath := filepath.Join(destPath, "rootfs")
+
+		// Check for separate root file.
+		if shared.PathExists(imageRootfsFile) {
+			err = os.MkdirAll(rootfsPath, 0755)
+			if err != nil {
+				return fmt.Errorf("Error creating rootfs directory")
+			}
+
+			err = shared.Unpack(imageRootfsFile, rootfsPath, blockBackend, runningInUserns, tracker)
+			if err != nil {
+				return err
+			}
 		}
 
-		err = shared.Unpack(imageFile+".rootfs", rootfsPath, blockBackend, runningInUserns, tracker)
+		// Check that tje container image unpack has resulted in a rootfs dir.
+		if !shared.PathExists(rootfsPath) {
+			return fmt.Errorf("Image is missing a rootfs: %s", imageFile)
+		}
+	} else {
+		// If a rootBlockPath is supplied then this is a VM image unpack.
+
+		// VM images require a separate rootfs file.
+		if !shared.PathExists(imageRootfsFile) {
+			return fmt.Errorf("Image is missing a rootfs file: %s", imageRootfsFile)
+		}
+
+		// Check that the rootBlockPath exists and is a file.
+		fileInfo, err := os.Stat(destBlockFile)
 		if err != nil {
 			return err
 		}
-	}
 
-	if !shared.PathExists(rootfsPath) {
-		return fmt.Errorf("Image is missing a rootfs: %s", imageFile)
+		if fileInfo.IsDir() {
+			return fmt.Errorf("Root block path isn't a file: %s", destBlockFile)
+		}
+
+		// Convert the qcow2 format to a raw block device.
+		_, err = shared.RunCommand("qemu-img", "convert", "-O", "raw", imageRootfsFile, destBlockFile)
+		if err != nil {
+			return err
+		}
 	}
 
 	return nil

From a33b4419ad403d38dcdd8d9b33015162c37c4793 Mon Sep 17 00:00:00 2001
From: Thomas Parrott <thomas.parrott at canonical.com>
Date: Wed, 6 Nov 2019 17:07:44 +0000
Subject: [PATCH 30/30] lxd/storage: Updates ImageUnpack usage

Signed-off-by: Thomas Parrott <thomas.parrott at canonical.com>
---
 lxd/storage_btrfs.go     | 2 +-
 lxd/storage_ceph.go      | 2 +-
 lxd/storage_dir.go       | 2 +-
 lxd/storage_lvm.go       | 2 +-
 lxd/storage_lvm_utils.go | 2 +-
 lxd/storage_zfs.go       | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/lxd/storage_btrfs.go b/lxd/storage_btrfs.go
index 23deb6119c..1d31793083 100644
--- a/lxd/storage_btrfs.go
+++ b/lxd/storage_btrfs.go
@@ -2008,7 +2008,7 @@ func (s *storageBtrfs) ImageCreate(fingerprint string, tracker *ioprogress.Progr
 
 	// Unpack the image in imageMntPoint.
 	imagePath := shared.VarPath("images", fingerprint)
-	err = driver.ImageUnpack(imagePath, tmpImageSubvolumeName, false, s.s.OS.RunningInUserNS, tracker)
+	err = driver.ImageUnpack(imagePath, tmpImageSubvolumeName, "", false, s.s.OS.RunningInUserNS, tracker)
 	if err != nil {
 		return err
 	}
diff --git a/lxd/storage_ceph.go b/lxd/storage_ceph.go
index 0cfcd64db3..20d619fe22 100644
--- a/lxd/storage_ceph.go
+++ b/lxd/storage_ceph.go
@@ -2102,7 +2102,7 @@ func (s *storageCeph) ImageCreate(fingerprint string, tracker *ioprogress.Progre
 
 		// rsync contents into image
 		imagePath := shared.VarPath("images", fingerprint)
-		err = driver.ImageUnpack(imagePath, imageMntPoint, true, s.s.OS.RunningInUserNS, nil)
+		err = driver.ImageUnpack(imagePath, imageMntPoint, "", true, s.s.OS.RunningInUserNS, nil)
 		if err != nil {
 			logger.Errorf(`Failed to unpack image for RBD storage volume for image "%s" on storage pool "%s": %s`, fingerprint, s.pool.Name, err)
 
diff --git a/lxd/storage_dir.go b/lxd/storage_dir.go
index 15044043d2..1d4833ae45 100644
--- a/lxd/storage_dir.go
+++ b/lxd/storage_dir.go
@@ -572,7 +572,7 @@ func (s *storageDir) ContainerCreateFromImage(container Instance, imageFingerpri
 	}
 
 	imagePath := shared.VarPath("images", imageFingerprint)
-	err = driver.ImageUnpack(imagePath, containerMntPoint, false, s.s.OS.RunningInUserNS, nil)
+	err = driver.ImageUnpack(imagePath, containerMntPoint, "", false, s.s.OS.RunningInUserNS, nil)
 	if err != nil {
 		return errors.Wrap(err, "Unpack image")
 	}
diff --git a/lxd/storage_lvm.go b/lxd/storage_lvm.go
index d8f47ffe0d..ce7aa01137 100644
--- a/lxd/storage_lvm.go
+++ b/lxd/storage_lvm.go
@@ -1940,7 +1940,7 @@ func (s *storageLvm) ImageCreate(fingerprint string, tracker *ioprogress.Progres
 		}
 
 		imagePath := shared.VarPath("images", fingerprint)
-		err = driver.ImageUnpack(imagePath, imageMntPoint, true, s.s.OS.RunningInUserNS, nil)
+		err = driver.ImageUnpack(imagePath, imageMntPoint, "", true, s.s.OS.RunningInUserNS, nil)
 		if err != nil {
 			return err
 		}
diff --git a/lxd/storage_lvm_utils.go b/lxd/storage_lvm_utils.go
index 31d63fb23a..35b6333f7c 100644
--- a/lxd/storage_lvm_utils.go
+++ b/lxd/storage_lvm_utils.go
@@ -504,7 +504,7 @@ func (s *storageLvm) containerCreateFromImageLv(c Instance, fp string) error {
 
 	imagePath := shared.VarPath("images", fp)
 	containerMntPoint := driver.GetContainerMountPoint(c.Project(), s.pool.Name, containerName)
-	err = driver.ImageUnpack(imagePath, containerMntPoint, true, s.s.OS.RunningInUserNS, nil)
+	err = driver.ImageUnpack(imagePath, containerMntPoint, "", true, s.s.OS.RunningInUserNS, nil)
 	if err != nil {
 		logger.Errorf(`Failed to unpack image "%s" into non-thinpool LVM storage volume "%s" for container "%s" on storage pool "%s": %s`, imagePath, containerMntPoint, containerName, s.pool.Name, err)
 		return err
diff --git a/lxd/storage_zfs.go b/lxd/storage_zfs.go
index ca7cc6cbc2..0b1b9efd69 100644
--- a/lxd/storage_zfs.go
+++ b/lxd/storage_zfs.go
@@ -2406,7 +2406,7 @@ func (s *storageZfs) ImageCreate(fingerprint string, tracker *ioprogress.Progres
 	}
 
 	// Unpack the image into the temporary mountpoint.
-	err = driver.ImageUnpack(imagePath, tmpImageDir, false, s.s.OS.RunningInUserNS, nil)
+	err = driver.ImageUnpack(imagePath, tmpImageDir, "", false, s.s.OS.RunningInUserNS, nil)
 	if err != nil {
 		return err
 	}


More information about the lxc-devel mailing list